1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::ty::layout::{Layout, LayoutTyper};
15 use rustc::mir::tcx::LvalueTy;
17 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
18 use rustc_apfloat::{ieee, Float, Status, Round};
19 use rustc_const_math::MAX_F32_PLUS_HALF_ULP;
20 use std::{u128, i128};
25 use common::{self, val_ty, C_bool, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral};
35 use super::{MirContext, LocalRef};
36 use super::constant::const_scalar_checked_binop;
37 use super::operand::{OperandRef, OperandValue};
38 use super::lvalue::LvalueRef;
40 impl<'a, 'tcx> MirContext<'a, 'tcx> {
41 pub fn trans_rvalue(&mut self,
42 bcx: Builder<'a, 'tcx>,
43 dest: LvalueRef<'tcx>,
44 rvalue: &mir::Rvalue<'tcx>)
47 debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
48 Value(dest.llval), rvalue);
51 mir::Rvalue::Use(ref operand) => {
52 let tr_operand = self.trans_operand(&bcx, operand);
53 // FIXME: consider not copying constants through stack. (fixable by translating
54 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
55 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
59 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
60 let cast_ty = self.monomorphize(&cast_ty);
62 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
63 // into-coerce of a thin pointer to a fat pointer - just
64 // use the operand path.
65 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
66 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
70 // Unsize of a nontrivial struct. I would prefer for
71 // this to be eliminated by MIR translation, but
72 // `CoerceUnsized` can be passed by a where-clause,
73 // so the (generic) MIR may not be able to expand it.
74 let operand = self.trans_operand(&bcx, source);
75 let operand = operand.pack_if_pair(&bcx);
76 let llref = match operand.val {
77 OperandValue::Pair(..) => bug!(),
78 OperandValue::Immediate(llval) => {
79 // unsize from an immediate structure. We don't
80 // really need a temporary alloca here, but
81 // avoiding it would require us to have
82 // `coerce_unsized_into` use extractvalue to
83 // index into the struct, and this case isn't
84 // important enough for it.
85 debug!("trans_rvalue: creating ugly alloca");
86 let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
87 base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
90 OperandValue::Ref(llref, align) => {
91 LvalueRef::new_sized_ty(llref, operand.ty, align)
94 base::coerce_unsized_into(&bcx, &llref, &dest);
98 mir::Rvalue::Repeat(ref elem, count) => {
99 let dest_ty = dest.ty.to_ty(bcx.tcx());
101 // No need to inizialize memory of a zero-sized slice
102 if common::type_is_zero_size(bcx.ccx, dest_ty) {
106 let tr_elem = self.trans_operand(&bcx, elem);
107 let size = count.as_u64();
108 let size = C_usize(bcx.ccx, size);
109 let base = base::get_dataptr(&bcx, dest.llval);
110 let align = dest.alignment.to_align();
112 if let OperandValue::Immediate(v) = tr_elem.val {
113 // Use llvm.memset.p0i8.* to initialize all zero arrays
114 if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
115 let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
116 let align = C_i32(bcx.ccx, align as i32);
117 let ty = type_of::type_of(bcx.ccx, dest_ty);
118 let size = machine::llsize_of(bcx.ccx, ty);
119 let fill = C_uint(Type::i8(bcx.ccx), 0);
120 base::call_memset(&bcx, base, fill, size, align, false);
124 // Use llvm.memset.p0i8.* to initialize byte arrays
125 if common::val_ty(v) == Type::i8(bcx.ccx) {
126 let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
127 let align = C_i32(bcx.ccx, align as i32);
128 base::call_memset(&bcx, base, v, size, align, false);
133 tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| {
134 self.store_operand(bcx, llslot, align, tr_elem);
139 mir::Rvalue::Aggregate(ref kind, ref operands) => {
141 mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
142 let discr = adt_def.discriminant_for_variant(bcx.tcx(), variant_index)
143 .to_u128_unchecked() as u64;
144 let dest_ty = dest.ty.to_ty(bcx.tcx());
145 adt::trans_set_discr(&bcx, dest_ty, dest.llval, discr);
146 for (i, operand) in operands.iter().enumerate() {
147 let op = self.trans_operand(&bcx, operand);
148 // Do not generate stores and GEPis for zero-sized fields.
149 if !common::type_is_zero_size(bcx.ccx, op.ty) {
150 let mut val = LvalueRef::new_sized(
151 dest.llval, dest.ty, dest.alignment);
152 let field_index = active_field_index.unwrap_or(i);
153 val.ty = LvalueTy::Downcast {
155 substs: self.monomorphize(&substs),
158 let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
159 self.store_operand(&bcx, lldest_i, align.to_align(), op);
164 // If this is a tuple or closure, we need to translate GEP indices.
165 let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
166 let get_memory_index = |i| {
167 if let Layout::Univariant { ref variant, .. } = *layout {
168 adt::struct_llfields_index(variant, i)
173 let alignment = dest.alignment;
174 for (i, operand) in operands.iter().enumerate() {
175 let op = self.trans_operand(&bcx, operand);
176 // Do not generate stores and GEPis for zero-sized fields.
177 if !common::type_is_zero_size(bcx.ccx, op.ty) {
178 // Note: perhaps this should be StructGep, but
179 // note that in some cases the values here will
180 // not be structs but arrays.
181 let i = get_memory_index(i);
182 let dest = bcx.gepi(dest.llval, &[0, i]);
183 self.store_operand(&bcx, dest, alignment.to_align(), op);
192 assert!(self.rvalue_creates_operand(rvalue));
193 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
194 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
200 pub fn trans_rvalue_operand(&mut self,
201 bcx: Builder<'a, 'tcx>,
202 rvalue: &mir::Rvalue<'tcx>)
203 -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
205 assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
208 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
209 let operand = self.trans_operand(&bcx, source);
210 debug!("cast operand is {:?}", operand);
211 let cast_ty = self.monomorphize(&cast_ty);
213 let val = match *kind {
214 mir::CastKind::ReifyFnPointer => {
215 match operand.ty.sty {
216 ty::TyFnDef(def_id, substs) => {
217 OperandValue::Immediate(
218 callee::resolve_and_get_fn(bcx.ccx, def_id, substs))
221 bug!("{} cannot be reified to a fn ptr", operand.ty)
225 mir::CastKind::ClosureFnPointer => {
226 match operand.ty.sty {
227 ty::TyClosure(def_id, substs) => {
228 let instance = monomorphize::resolve_closure(
229 bcx.ccx.tcx(), def_id, substs, ty::ClosureKind::FnOnce);
230 OperandValue::Immediate(callee::get_fn(bcx.ccx, instance))
233 bug!("{} cannot be cast to a fn ptr", operand.ty)
237 mir::CastKind::UnsafeFnPointer => {
238 // this is a no-op at the LLVM level
241 mir::CastKind::Unsize => {
242 // unsize targets other than to a fat pointer currently
243 // can't be operands.
244 assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
247 OperandValue::Pair(lldata, llextra) => {
248 // unsize from a fat pointer - this is a
249 // "trait-object-to-supertrait" coercion, for
251 // &'a fmt::Debug+Send => &'a fmt::Debug,
252 // So we need to pointercast the base to ensure
253 // the types match up.
254 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
255 let lldata = bcx.pointercast(lldata, llcast_ty);
256 OperandValue::Pair(lldata, llextra)
258 OperandValue::Immediate(lldata) => {
260 let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
261 operand.ty, cast_ty);
262 OperandValue::Pair(lldata, llextra)
264 OperandValue::Ref(..) => {
265 bug!("by-ref operand {:?} in trans_rvalue_operand",
270 mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
271 let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty);
272 let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty);
273 if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
274 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
275 let ll_cft = ll_cast_ty.field_types();
276 let ll_fft = ll_from_ty.field_types();
277 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
278 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
279 OperandValue::Pair(data_cast, meta_ptr)
280 } else { // cast to thin-ptr
281 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
282 // pointer-cast of that pointer to desired pointer type.
283 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
284 OperandValue::Immediate(llval)
287 bug!("Unexpected non-Pair operand")
290 mir::CastKind::Misc => {
291 debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
292 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
293 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
294 let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty);
295 let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty);
296 let llval = operand.immediate();
297 let l = bcx.ccx.layout_of(operand.ty);
298 let signed = if let Layout::CEnum { signed, min, max, .. } = *l {
300 // We want `table[e as usize]` to not
301 // have bound checks, and this is the most
302 // convenient place to put the `assume`.
304 base::call_assume(&bcx, bcx.icmp(
307 C_uint(common::val_ty(llval), max)
313 operand.ty.is_signed()
316 let newval = match (r_t_in, r_t_out) {
317 (CastTy::Int(_), CastTy::Int(_)) => {
318 bcx.intcast(llval, ll_t_out, signed)
320 (CastTy::Float, CastTy::Float) => {
321 let srcsz = ll_t_in.float_width();
322 let dstsz = ll_t_out.float_width();
324 bcx.fpext(llval, ll_t_out)
325 } else if srcsz > dstsz {
326 bcx.fptrunc(llval, ll_t_out)
331 (CastTy::Ptr(_), CastTy::Ptr(_)) |
332 (CastTy::FnPtr, CastTy::Ptr(_)) |
333 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
334 bcx.pointercast(llval, ll_t_out),
335 (CastTy::Ptr(_), CastTy::Int(_)) |
336 (CastTy::FnPtr, CastTy::Int(_)) =>
337 bcx.ptrtoint(llval, ll_t_out),
338 (CastTy::Int(_), CastTy::Ptr(_)) =>
339 bcx.inttoptr(llval, ll_t_out),
340 (CastTy::Int(_), CastTy::Float) =>
341 cast_int_to_float(&bcx, signed, llval, ll_t_in, ll_t_out),
342 (CastTy::Float, CastTy::Int(IntTy::I)) =>
343 cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out),
344 (CastTy::Float, CastTy::Int(_)) =>
345 cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out),
346 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
348 OperandValue::Immediate(newval)
351 let operand = OperandRef {
358 mir::Rvalue::Ref(_, bk, ref lvalue) => {
359 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
361 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
362 let ref_ty = bcx.tcx().mk_ref(
363 bcx.tcx().types.re_erased,
364 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
367 // Note: lvalues are indirect, so storing the `llval` into the
368 // destination effectively creates a reference.
369 let operand = if !bcx.ccx.shared().type_has_metadata(ty) {
371 val: OperandValue::Immediate(tr_lvalue.llval),
376 val: OperandValue::Pair(tr_lvalue.llval,
384 mir::Rvalue::Len(ref lvalue) => {
385 let size = self.evaluate_array_len(&bcx, lvalue);
386 let operand = OperandRef {
387 val: OperandValue::Immediate(size),
388 ty: bcx.tcx().types.usize,
393 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
394 let lhs = self.trans_operand(&bcx, lhs);
395 let rhs = self.trans_operand(&bcx, rhs);
396 let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
397 match (lhs.val, rhs.val) {
398 (OperandValue::Pair(lhs_addr, lhs_extra),
399 OperandValue::Pair(rhs_addr, rhs_extra)) => {
400 self.trans_fat_ptr_binop(&bcx, op,
409 self.trans_scalar_binop(&bcx, op,
410 lhs.immediate(), rhs.immediate(),
413 let operand = OperandRef {
414 val: OperandValue::Immediate(llresult),
415 ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
419 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
420 let lhs = self.trans_operand(&bcx, lhs);
421 let rhs = self.trans_operand(&bcx, rhs);
422 let result = self.trans_scalar_checked_binop(&bcx, op,
423 lhs.immediate(), rhs.immediate(),
425 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
426 let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
427 let operand = OperandRef {
435 mir::Rvalue::UnaryOp(op, ref operand) => {
436 let operand = self.trans_operand(&bcx, operand);
437 let lloperand = operand.immediate();
438 let is_float = operand.ty.is_fp();
439 let llval = match op {
440 mir::UnOp::Not => bcx.not(lloperand),
441 mir::UnOp::Neg => if is_float {
448 val: OperandValue::Immediate(llval),
453 mir::Rvalue::Discriminant(ref lvalue) => {
454 let discr_lvalue = self.trans_lvalue(&bcx, lvalue);
455 let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx());
456 let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
457 let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty);
458 let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval,
459 discr_lvalue.alignment, Some(discr_type), true);
461 val: OperandValue::Immediate(discr),
466 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
467 assert!(bcx.ccx.shared().type_is_sized(ty));
468 let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty));
471 val: OperandValue::Immediate(val),
476 mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
477 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
478 let llty = type_of::type_of(bcx.ccx, content_ty);
479 let llsize = machine::llsize_of(bcx.ccx, llty);
480 let align = bcx.ccx.align_of(content_ty);
481 let llalign = C_usize(bcx.ccx, align as u64);
482 let llty_ptr = llty.ptr_to();
483 let box_ty = bcx.tcx().mk_box(content_ty);
486 let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
489 bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
492 let instance = ty::Instance::mono(bcx.tcx(), def_id);
493 let r = callee::get_fn(bcx.ccx, instance);
494 let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
496 let operand = OperandRef {
497 val: OperandValue::Immediate(val),
502 mir::Rvalue::Use(ref operand) => {
503 let operand = self.trans_operand(&bcx, operand);
506 mir::Rvalue::Repeat(..) |
507 mir::Rvalue::Aggregate(..) => {
508 // According to `rvalue_creates_operand`, only ZST
509 // aggregate rvalues are allowed to be operands.
510 let ty = rvalue.ty(self.mir, self.ccx.tcx());
511 (bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty)))
516 fn evaluate_array_len(&mut self,
517 bcx: &Builder<'a, 'tcx>,
518 lvalue: &mir::Lvalue<'tcx>) -> ValueRef
520 // ZST are passed as operands and require special handling
521 // because trans_lvalue() panics if Local is operand.
522 if let mir::Lvalue::Local(index) = *lvalue {
523 if let LocalRef::Operand(Some(op)) = self.locals[index] {
524 if common::type_is_zero_size(bcx.ccx, op.ty) {
525 if let ty::TyArray(_, n) = op.ty.sty {
526 let n = n.val.to_const_int().unwrap().to_u64().unwrap();
527 return common::C_usize(bcx.ccx, n);
532 // use common size calculation for non zero-sized types
533 let tr_value = self.trans_lvalue(&bcx, lvalue);
534 return tr_value.len(bcx.ccx);
537 pub fn trans_scalar_binop(&mut self,
538 bcx: &Builder<'a, 'tcx>,
542 input_ty: Ty<'tcx>) -> ValueRef {
543 let is_float = input_ty.is_fp();
544 let is_signed = input_ty.is_signed();
545 let is_nil = input_ty.is_nil();
546 let is_bool = input_ty.is_bool();
548 mir::BinOp::Add => if is_float {
553 mir::BinOp::Sub => if is_float {
558 mir::BinOp::Mul => if is_float {
563 mir::BinOp::Div => if is_float {
565 } else if is_signed {
570 mir::BinOp::Rem => if is_float {
572 } else if is_signed {
577 mir::BinOp::BitOr => bcx.or(lhs, rhs),
578 mir::BinOp::BitAnd => bcx.and(lhs, rhs),
579 mir::BinOp::BitXor => bcx.xor(lhs, rhs),
580 mir::BinOp::Offset => bcx.inbounds_gep(lhs, &[rhs]),
581 mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
582 mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
583 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
584 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
585 C_bool(bcx.ccx, match op {
586 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
587 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
592 base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
596 let (lhs, rhs) = if is_bool {
597 // FIXME(#36856) -- extend the bools into `i8` because
598 // LLVM's i1 comparisons are broken.
599 (bcx.zext(lhs, Type::i8(bcx.ccx)),
600 bcx.zext(rhs, Type::i8(bcx.ccx)))
606 base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
613 pub fn trans_fat_ptr_binop(&mut self,
614 bcx: &Builder<'a, 'tcx>,
625 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
626 bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
631 bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
632 bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
635 mir::BinOp::Le | mir::BinOp::Lt |
636 mir::BinOp::Ge | mir::BinOp::Gt => {
637 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
638 let (op, strict_op) = match op {
639 mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
640 mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
641 mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
642 mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
647 bcx.icmp(strict_op, lhs_addr, rhs_addr),
649 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
650 bcx.icmp(op, lhs_extra, rhs_extra)
655 bug!("unexpected fat ptr binop");
660 pub fn trans_scalar_checked_binop(&mut self,
661 bcx: &Builder<'a, 'tcx>,
665 input_ty: Ty<'tcx>) -> OperandValue {
666 // This case can currently arise only from functions marked
667 // with #[rustc_inherit_overflow_checks] and inlined from
668 // another crate (mostly core::num generic/#[inline] fns),
669 // while the current crate doesn't use overflow checks.
670 if !bcx.ccx.check_overflow() {
671 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
672 return OperandValue::Pair(val, C_bool(bcx.ccx, false));
675 // First try performing the operation on constants, which
676 // will only succeed if both operands are constant.
677 // This is necessary to determine when an overflow Assert
678 // will always panic at runtime, and produce a warning.
679 if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
680 return OperandValue::Pair(val, C_bool(bcx.ccx, of));
683 let (val, of) = match op {
684 // These are checked using intrinsics
685 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
687 mir::BinOp::Add => OverflowOp::Add,
688 mir::BinOp::Sub => OverflowOp::Sub,
689 mir::BinOp::Mul => OverflowOp::Mul,
692 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
693 let res = bcx.call(intrinsic, &[lhs, rhs], None);
695 (bcx.extract_value(res, 0),
696 bcx.extract_value(res, 1))
698 mir::BinOp::Shl | mir::BinOp::Shr => {
699 let lhs_llty = val_ty(lhs);
700 let rhs_llty = val_ty(rhs);
701 let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
702 let outer_bits = bcx.and(rhs, invert_mask);
704 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
705 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
710 bug!("Operator `{:?}` is not a checkable operator", op)
714 OperandValue::Pair(val, of)
717 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
719 mir::Rvalue::Ref(..) |
720 mir::Rvalue::Len(..) |
721 mir::Rvalue::Cast(..) | // (*)
722 mir::Rvalue::BinaryOp(..) |
723 mir::Rvalue::CheckedBinaryOp(..) |
724 mir::Rvalue::UnaryOp(..) |
725 mir::Rvalue::Discriminant(..) |
726 mir::Rvalue::NullaryOp(..) |
727 mir::Rvalue::Use(..) => // (*)
729 mir::Rvalue::Repeat(..) |
730 mir::Rvalue::Aggregate(..) => {
731 let ty = rvalue.ty(self.mir, self.ccx.tcx());
732 let ty = self.monomorphize(&ty);
733 common::type_is_zero_size(self.ccx, ty)
737 // (*) this is only true if the type is suitable
741 #[derive(Copy, Clone)]
746 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
747 use syntax::ast::IntTy::*;
748 use syntax::ast::UintTy::*;
749 use rustc::ty::{TyInt, TyUint};
753 let new_sty = match ty.sty {
754 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
758 _ => panic!("unsupported target word size")
760 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
764 _ => panic!("unsupported target word size")
766 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
767 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
770 let name = match oop {
771 OverflowOp::Add => match new_sty {
772 TyInt(I8) => "llvm.sadd.with.overflow.i8",
773 TyInt(I16) => "llvm.sadd.with.overflow.i16",
774 TyInt(I32) => "llvm.sadd.with.overflow.i32",
775 TyInt(I64) => "llvm.sadd.with.overflow.i64",
776 TyInt(I128) => "llvm.sadd.with.overflow.i128",
778 TyUint(U8) => "llvm.uadd.with.overflow.i8",
779 TyUint(U16) => "llvm.uadd.with.overflow.i16",
780 TyUint(U32) => "llvm.uadd.with.overflow.i32",
781 TyUint(U64) => "llvm.uadd.with.overflow.i64",
782 TyUint(U128) => "llvm.uadd.with.overflow.i128",
786 OverflowOp::Sub => match new_sty {
787 TyInt(I8) => "llvm.ssub.with.overflow.i8",
788 TyInt(I16) => "llvm.ssub.with.overflow.i16",
789 TyInt(I32) => "llvm.ssub.with.overflow.i32",
790 TyInt(I64) => "llvm.ssub.with.overflow.i64",
791 TyInt(I128) => "llvm.ssub.with.overflow.i128",
793 TyUint(U8) => "llvm.usub.with.overflow.i8",
794 TyUint(U16) => "llvm.usub.with.overflow.i16",
795 TyUint(U32) => "llvm.usub.with.overflow.i32",
796 TyUint(U64) => "llvm.usub.with.overflow.i64",
797 TyUint(U128) => "llvm.usub.with.overflow.i128",
801 OverflowOp::Mul => match new_sty {
802 TyInt(I8) => "llvm.smul.with.overflow.i8",
803 TyInt(I16) => "llvm.smul.with.overflow.i16",
804 TyInt(I32) => "llvm.smul.with.overflow.i32",
805 TyInt(I64) => "llvm.smul.with.overflow.i64",
806 TyInt(I128) => "llvm.smul.with.overflow.i128",
808 TyUint(U8) => "llvm.umul.with.overflow.i8",
809 TyUint(U16) => "llvm.umul.with.overflow.i16",
810 TyUint(U32) => "llvm.umul.with.overflow.i32",
811 TyUint(U64) => "llvm.umul.with.overflow.i64",
812 TyUint(U128) => "llvm.umul.with.overflow.i128",
818 bcx.ccx.get_intrinsic(&name)
821 fn cast_int_to_float(bcx: &Builder,
825 float_ty: Type) -> ValueRef {
826 // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
827 // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
828 // LLVM's uitofp produces undef in those cases, so we manually check for that case.
829 let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32;
830 if is_u128_to_f32 && bcx.sess().opts.debugging_opts.saturating_float_casts {
831 // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
832 // and for everything else LLVM's uitofp works just fine.
833 let max = C_big_integral(int_ty, MAX_F32_PLUS_HALF_ULP);
834 let overflow = bcx.icmp(llvm::IntUGE, x, max);
835 let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32);
836 let infinity = consts::bitcast(infinity_bits, float_ty);
837 bcx.select(overflow, infinity, bcx.uitofp(x, float_ty))
840 bcx.sitofp(x, float_ty)
842 bcx.uitofp(x, float_ty)
847 fn cast_float_to_int(bcx: &Builder,
851 int_ty: Type) -> ValueRef {
852 let fptosui_result = if signed {
853 bcx.fptosi(x, int_ty)
855 bcx.fptoui(x, int_ty)
858 if !bcx.sess().opts.debugging_opts.saturating_float_casts {
859 return fptosui_result;
861 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
862 // destination integer type after rounding towards zero. This `undef` value can cause UB in
863 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
864 // Semantically, the mathematical value of the input is rounded towards zero to the next
865 // mathematical integer, and then the result is clamped into the range of the destination
866 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
867 // the destination integer type. NaN is mapped to 0.
869 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
870 // a value representable in int_ty.
871 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
872 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
873 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
874 // representable. Note that this only works if float_ty's exponent range is sufficently large.
875 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
876 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
877 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
878 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
879 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
880 fn compute_clamp_bounds<F: Float>(signed: bool, int_ty: Type) -> (u128, u128) {
881 let f_min = if signed {
882 let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
883 assert_eq!(rounded_min.status, Status::OK);
889 let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
890 assert!(rounded_max.value.is_finite());
892 (f_min.to_bits(), rounded_max.value.to_bits())
894 fn int_max(signed: bool, int_ty: Type) -> u128 {
895 let shift_amount = 128 - int_ty.int_width();
897 i128::MAX as u128 >> shift_amount
899 u128::MAX >> shift_amount
902 fn int_min(signed: bool, int_ty: Type) -> i128 {
904 i128::MIN >> (128 - int_ty.int_width())
909 let (f_min, f_max) = match float_ty.float_width() {
910 32 => compute_clamp_bounds::<ieee::Single>(signed, int_ty),
911 64 => compute_clamp_bounds::<ieee::Double>(signed, int_ty),
912 n => bug!("unsupported float width {}", n),
914 let float_bits_to_llval = |bits| {
915 let bits_llval = match float_ty.float_width() {
916 32 => C_u32(bcx.ccx, bits as u32),
917 64 => C_u64(bcx.ccx, bits as u64),
918 n => bug!("unsupported float width {}", n),
920 consts::bitcast(bits_llval, float_ty)
922 let f_min = float_bits_to_llval(f_min);
923 let f_max = float_bits_to_llval(f_max);
924 // To implement saturation, we perform the following steps:
926 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
927 // 2. Compare x to f_min and f_max, and use the comparison results to select:
928 // a) int_ty::MIN if x < f_min or x is NaN
929 // b) int_ty::MAX if x > f_max
930 // c) the result of fpto[su]i otherwise
931 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
933 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
934 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
935 // undef does not introduce any non-determinism either.
936 // More importantly, the above procedure correctly implements saturating conversion.
938 // If x is NaN, 0 is trivially returned.
939 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
940 // This yields three cases to consider:
941 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
942 // saturating conversion for inputs in that range.
943 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
944 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
945 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value is correct.
946 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
947 // int_ty::MIN and therefore the return value of int_ty::MIN is immediately correct.
950 // Step 1 was already performed above.
952 // Step 2: We use two comparisons and two selects, with s1 being the result:
953 // %less = fcmp ult %x, %f_min
954 // %greater = fcmp olt %x, %f_max
955 // %s0 = select %less, int_ty::MIN, %fptosi_result
956 // %s1 = select %greater, int_ty::MAX, %s0
957 // Note that %less uses an *unordered* comparison. This comparison is true if the operands are
958 // not comparable (i.e., if x is NaN). The unordered comparison ensures that s1 becomes
959 // int_ty::MIN if x is NaN.
960 // Performance note: It can be lowered to a flipped comparison and a negation (and the negation
961 // can be merged into the select), so it not necessarily any more expensive than a ordered
962 // ("normal") comparison. Whether these optimizations will be performed is ultimately up to the
963 // backend but at least x86 does that.
964 let less = bcx.fcmp(llvm::RealULT, x, f_min);
965 let greater = bcx.fcmp(llvm::RealOGT, x, f_max);
966 let int_max = C_big_integral(int_ty, int_max(signed, int_ty) as u128);
967 let int_min = C_big_integral(int_ty, int_min(signed, int_ty) as u128);
968 let s0 = bcx.select(less, int_min, fptosui_result);
969 let s1 = bcx.select(greater, int_max, s0);
971 // Step 3: NaN replacement.
972 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
973 // Therefore we only need to execute this step for signed integer types.
975 // LLVM has no isNaN predicate, so we use (x == x) instead
976 bcx.select(bcx.fcmp(llvm::RealOEQ, x, x), s1, C_big_integral(int_ty, 0))