1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use rustc::ty::{self, Ty};
12 use rustc::ty::cast::{CastTy, IntTy};
13 use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
15 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
16 use rustc_apfloat::{ieee, Float, Status, Round};
17 use std::{u128, i128};
20 use builder::{Builder, MemFlags};
22 use common::{self, IntPredicate, RealPredicate};
25 use type_of::LayoutLlvmExt;
28 use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, IntrinsicDeclarationMethods};
30 use super::{FunctionCx, LocalRef};
31 use super::operand::{OperandRef, OperandValue};
32 use super::place::PlaceRef;
34 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
35 pub fn codegen_rvalue(&mut self,
36 bx: Builder<'a, 'll, 'tcx>,
37 dest: PlaceRef<'tcx, &'ll Value>,
38 rvalue: &mir::Rvalue<'tcx>)
39 -> Builder<'a, 'll, 'tcx>
41 debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
45 mir::Rvalue::Use(ref operand) => {
46 let cg_operand = self.codegen_operand(&bx, operand);
47 // FIXME: consider not copying constants through stack. (fixable by codegenning
48 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
49 cg_operand.val.store(&bx, dest);
53 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
54 // The destination necessarily contains a fat pointer, so if
55 // it's a scalar pair, it's a fat pointer or newtype thereof.
56 if dest.layout.is_llvm_scalar_pair() {
57 // into-coerce of a thin pointer to a fat pointer - just
58 // use the operand path.
59 let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
60 temp.val.store(&bx, dest);
64 // Unsize of a nontrivial struct. I would prefer for
65 // this to be eliminated by MIR building, but
66 // `CoerceUnsized` can be passed by a where-clause,
67 // so the (generic) MIR may not be able to expand it.
68 let operand = self.codegen_operand(&bx, source);
70 OperandValue::Pair(..) |
71 OperandValue::Immediate(_) => {
72 // unsize from an immediate structure. We don't
73 // really need a temporary alloca here, but
74 // avoiding it would require us to have
75 // `coerce_unsized_into` use extractvalue to
76 // index into the struct, and this case isn't
77 // important enough for it.
78 debug!("codegen_rvalue: creating ugly alloca");
79 let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
80 scratch.storage_live(&bx);
81 operand.val.store(&bx, scratch);
82 base::coerce_unsized_into(&bx, scratch, dest);
83 scratch.storage_dead(&bx);
85 OperandValue::Ref(llref, None, align) => {
86 let source = PlaceRef::new_sized(llref, operand.layout, align);
87 base::coerce_unsized_into(&bx, source, dest);
89 OperandValue::Ref(_, Some(_), _) => {
90 bug!("unsized coercion on an unsized rvalue")
96 mir::Rvalue::Repeat(ref elem, count) => {
97 let cg_elem = self.codegen_operand(&bx, elem);
99 // Do not generate the loop for zero-sized elements or empty arrays.
100 if dest.layout.is_zst() {
104 let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
106 if let OperandValue::Immediate(v) = cg_elem.val {
107 let size = bx.cx().const_usize(dest.layout.size.bytes());
109 // Use llvm.memset.p0i8.* to initialize all zero arrays
110 if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 {
111 let fill = bx.cx().const_u8(0);
112 bx.memset(start, fill, size, dest.align, MemFlags::empty());
116 // Use llvm.memset.p0i8.* to initialize byte arrays
117 let v = base::from_immediate(&bx, v);
118 if bx.cx().val_ty(v) == bx.cx().type_i8() {
119 bx.memset(start, v, size, dest.align, MemFlags::empty());
124 let count = bx.cx().const_usize(count);
125 let end = dest.project_index(&bx, count).llval;
127 let header_bx = bx.build_sibling_block("repeat_loop_header");
128 let body_bx = bx.build_sibling_block("repeat_loop_body");
129 let next_bx = bx.build_sibling_block("repeat_loop_next");
131 bx.br(header_bx.llbb());
132 let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]);
134 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
135 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
137 cg_elem.val.store(&body_bx,
138 PlaceRef::new_sized(current, cg_elem.layout, dest.align));
140 let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
141 body_bx.br(header_bx.llbb());
142 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
147 mir::Rvalue::Aggregate(ref kind, ref operands) => {
148 let (dest, active_field_index) = match **kind {
149 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
150 dest.codegen_set_discr(&bx, variant_index);
151 if adt_def.is_enum() {
152 (dest.project_downcast(&bx, variant_index), active_field_index)
154 (dest, active_field_index)
159 for (i, operand) in operands.iter().enumerate() {
160 let op = self.codegen_operand(&bx, operand);
161 // Do not generate stores and GEPis for zero-sized fields.
162 if !op.layout.is_zst() {
163 let field_index = active_field_index.unwrap_or(i);
164 op.val.store(&bx, dest.project_field(&bx, field_index));
171 assert!(self.rvalue_creates_operand(rvalue));
172 let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
173 temp.val.store(&bx, dest);
179 pub fn codegen_rvalue_unsized(&mut self,
180 bx: Builder<'a, 'll, 'tcx>,
181 indirect_dest: PlaceRef<'tcx, &'ll Value>,
182 rvalue: &mir::Rvalue<'tcx>)
183 -> Builder<'a, 'll, 'tcx>
185 debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
186 indirect_dest.llval, rvalue);
189 mir::Rvalue::Use(ref operand) => {
190 let cg_operand = self.codegen_operand(&bx, operand);
191 cg_operand.val.store_unsized(&bx, indirect_dest);
195 _ => bug!("unsized assignment other than Rvalue::Use"),
199 pub fn codegen_rvalue_operand(
201 bx: Builder<'a, 'll, 'tcx>,
202 rvalue: &mir::Rvalue<'tcx>
203 ) -> (Builder<'a, 'll, 'tcx>, OperandRef<'tcx, &'ll Value>) {
204 assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
207 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
208 let operand = self.codegen_operand(&bx, source);
209 debug!("cast operand is {:?}", operand);
210 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
212 let val = match *kind {
213 mir::CastKind::ReifyFnPointer => {
214 match operand.layout.ty.sty {
215 ty::FnDef(def_id, substs) => {
216 if bx.cx().tcx.has_attr(def_id, "rustc_args_required_const") {
217 bug!("reifying a fn ptr that requires \
220 OperandValue::Immediate(
221 callee::resolve_and_get_fn(bx.cx(), def_id, substs))
224 bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
228 mir::CastKind::ClosureFnPointer => {
229 match operand.layout.ty.sty {
230 ty::Closure(def_id, substs) => {
231 let instance = monomorphize::resolve_closure(
232 bx.cx().tcx, def_id, substs, ty::ClosureKind::FnOnce);
233 OperandValue::Immediate(callee::get_fn(bx.cx(), instance))
236 bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
240 mir::CastKind::UnsafeFnPointer => {
241 // this is a no-op at the LLVM level
244 mir::CastKind::Unsize => {
245 assert!(cast.is_llvm_scalar_pair());
247 OperandValue::Pair(lldata, llextra) => {
248 // unsize from a fat pointer - this is a
249 // "trait-object-to-supertrait" coercion, for
251 // &'a fmt::Debug+Send => &'a fmt::Debug,
253 // HACK(eddyb) have to bitcast pointers
254 // until LLVM removes pointee types.
255 let lldata = bx.pointercast(lldata,
256 cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
257 OperandValue::Pair(lldata, llextra)
259 OperandValue::Immediate(lldata) => {
261 let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
262 operand.layout.ty, cast.ty);
263 OperandValue::Pair(lldata, llextra)
265 OperandValue::Ref(..) => {
266 bug!("by-ref operand {:?} in codegen_rvalue_operand",
271 mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
272 if let OperandValue::Pair(data_ptr, meta) = operand.val {
273 if cast.is_llvm_scalar_pair() {
274 let data_cast = bx.pointercast(data_ptr,
275 cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
276 OperandValue::Pair(data_cast, meta)
277 } else { // cast to thin-ptr
278 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
279 // pointer-cast of that pointer to desired pointer type.
280 let llcast_ty = cast.immediate_llvm_type(bx.cx());
281 let llval = bx.pointercast(data_ptr, llcast_ty);
282 OperandValue::Immediate(llval)
285 bug!("Unexpected non-Pair operand")
288 mir::CastKind::Misc => {
289 assert!(cast.is_llvm_immediate());
290 let ll_t_out = cast.immediate_llvm_type(bx.cx());
291 if operand.layout.abi.is_uninhabited() {
292 let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
293 return (bx, OperandRef {
298 let r_t_in = CastTy::from_ty(operand.layout.ty)
299 .expect("bad input type for cast");
300 let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
301 let ll_t_in = operand.layout.immediate_llvm_type(bx.cx());
302 match operand.layout.variants {
303 layout::Variants::Single { index } => {
304 if let Some(def) = operand.layout.ty.ty_adt_def() {
306 .discriminant_for_variant(bx.cx().tcx, index)
308 let discr = bx.cx().const_uint_big(ll_t_out, discr_val);
309 return (bx, OperandRef {
310 val: OperandValue::Immediate(discr),
315 layout::Variants::Tagged { .. } |
316 layout::Variants::NicheFilling { .. } => {},
318 let llval = operand.immediate();
320 let mut signed = false;
321 if let layout::Abi::Scalar(ref scalar) = operand.layout.abi {
322 if let layout::Int(_, s) = scalar.value {
323 // We use `i1` for bytes that are always `0` or `1`,
324 // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
325 // let LLVM interpret the `i1` as signed, because
326 // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
327 signed = !scalar.is_bool() && s;
329 let er = scalar.valid_range_exclusive(bx.cx());
330 if er.end != er.start &&
331 scalar.valid_range.end() > scalar.valid_range.start() {
332 // We want `table[e as usize]` to not
333 // have bound checks, and this is the most
334 // convenient place to put the `assume`.
336 base::call_assume(&bx, bx.icmp(
337 IntPredicate::IntULE,
339 bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
345 let newval = match (r_t_in, r_t_out) {
346 (CastTy::Int(_), CastTy::Int(_)) => {
347 bx.intcast(llval, ll_t_out, signed)
349 (CastTy::Float, CastTy::Float) => {
350 let srcsz = bx.cx().float_width(ll_t_in);
351 let dstsz = bx.cx().float_width(ll_t_out);
353 bx.fpext(llval, ll_t_out)
354 } else if srcsz > dstsz {
355 bx.fptrunc(llval, ll_t_out)
360 (CastTy::Ptr(_), CastTy::Ptr(_)) |
361 (CastTy::FnPtr, CastTy::Ptr(_)) |
362 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
363 bx.pointercast(llval, ll_t_out),
364 (CastTy::Ptr(_), CastTy::Int(_)) |
365 (CastTy::FnPtr, CastTy::Int(_)) =>
366 bx.ptrtoint(llval, ll_t_out),
367 (CastTy::Int(_), CastTy::Ptr(_)) => {
368 let usize_llval = bx.intcast(llval, bx.cx().isize_ty, signed);
369 bx.inttoptr(usize_llval, ll_t_out)
371 (CastTy::Int(_), CastTy::Float) =>
372 cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
373 (CastTy::Float, CastTy::Int(IntTy::I)) =>
374 cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
375 (CastTy::Float, CastTy::Int(_)) =>
376 cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
377 _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
379 OperandValue::Immediate(newval)
388 mir::Rvalue::Ref(_, bk, ref place) => {
389 let cg_place = self.codegen_place(&bx, place);
391 let ty = cg_place.layout.ty;
393 // Note: places are indirect, so storing the `llval` into the
394 // destination effectively creates a reference.
395 let val = if !bx.cx().type_has_metadata(ty) {
396 OperandValue::Immediate(cg_place.llval)
398 OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
402 layout: self.cx.layout_of(self.cx.tcx.mk_ref(
403 self.cx.tcx.types.re_erased,
404 ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
409 mir::Rvalue::Len(ref place) => {
410 let size = self.evaluate_array_len(&bx, place);
411 let operand = OperandRef {
412 val: OperandValue::Immediate(size),
413 layout: bx.cx().layout_of(bx.tcx().types.usize),
418 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
419 let lhs = self.codegen_operand(&bx, lhs);
420 let rhs = self.codegen_operand(&bx, rhs);
421 let llresult = match (lhs.val, rhs.val) {
422 (OperandValue::Pair(lhs_addr, lhs_extra),
423 OperandValue::Pair(rhs_addr, rhs_extra)) => {
424 self.codegen_fat_ptr_binop(&bx, op,
430 (OperandValue::Immediate(lhs_val),
431 OperandValue::Immediate(rhs_val)) => {
432 self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
437 let operand = OperandRef {
438 val: OperandValue::Immediate(llresult),
439 layout: bx.cx().layout_of(
440 op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
444 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
445 let lhs = self.codegen_operand(&bx, lhs);
446 let rhs = self.codegen_operand(&bx, rhs);
447 let result = self.codegen_scalar_checked_binop(&bx, op,
448 lhs.immediate(), rhs.immediate(),
450 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
451 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
452 let operand = OperandRef {
454 layout: bx.cx().layout_of(operand_ty)
460 mir::Rvalue::UnaryOp(op, ref operand) => {
461 let operand = self.codegen_operand(&bx, operand);
462 let lloperand = operand.immediate();
463 let is_float = operand.layout.ty.is_fp();
464 let llval = match op {
465 mir::UnOp::Not => bx.not(lloperand),
466 mir::UnOp::Neg => if is_float {
473 val: OperandValue::Immediate(llval),
474 layout: operand.layout,
478 mir::Rvalue::Discriminant(ref place) => {
479 let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
480 let discr = self.codegen_place(&bx, place)
481 .codegen_get_discr(&bx, discr_ty);
483 val: OperandValue::Immediate(discr),
484 layout: self.cx.layout_of(discr_ty)
488 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
489 assert!(bx.cx().type_is_sized(ty));
490 let val = bx.cx().const_usize(bx.cx().size_of(ty).bytes());
491 let tcx = self.cx.tcx;
493 val: OperandValue::Immediate(val),
494 layout: self.cx.layout_of(tcx.types.usize),
498 mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
499 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
500 let (size, align) = bx.cx().size_and_align_of(content_ty);
501 let llsize = bx.cx().const_usize(size.bytes());
502 let llalign = bx.cx().const_usize(align.abi());
503 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
504 let llty_ptr = box_layout.llvm_type(bx.cx());
507 let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
510 bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
513 let instance = ty::Instance::mono(bx.tcx(), def_id);
514 let r = callee::get_fn(bx.cx(), instance);
515 let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
517 let operand = OperandRef {
518 val: OperandValue::Immediate(val),
523 mir::Rvalue::Use(ref operand) => {
524 let operand = self.codegen_operand(&bx, operand);
527 mir::Rvalue::Repeat(..) |
528 mir::Rvalue::Aggregate(..) => {
529 // According to `rvalue_creates_operand`, only ZST
530 // aggregate rvalues are allowed to be operands.
531 let ty = rvalue.ty(self.mir, self.cx.tcx);
532 (bx, OperandRef::new_zst(self.cx,
533 self.cx.layout_of(self.monomorphize(&ty))))
538 fn evaluate_array_len(
540 bx: &Builder<'a, 'll, 'tcx>,
541 place: &mir::Place<'tcx>,
543 // ZST are passed as operands and require special handling
544 // because codegen_place() panics if Local is operand.
545 if let mir::Place::Local(index) = *place {
546 if let LocalRef::Operand(Some(op)) = self.locals[index] {
547 if let ty::Array(_, n) = op.layout.ty.sty {
548 let n = n.unwrap_usize(bx.cx().tcx);
549 return bx.cx().const_usize(n);
553 // use common size calculation for non zero-sized types
554 let cg_value = self.codegen_place(&bx, place);
555 return cg_value.len(bx.cx());
558 pub fn codegen_scalar_binop(
560 bx: &Builder<'a, 'll, 'tcx>,
566 let is_float = input_ty.is_fp();
567 let is_signed = input_ty.is_signed();
568 let is_unit = input_ty.is_unit();
570 mir::BinOp::Add => if is_float {
575 mir::BinOp::Sub => if is_float {
580 mir::BinOp::Mul => if is_float {
585 mir::BinOp::Div => if is_float {
587 } else if is_signed {
592 mir::BinOp::Rem => if is_float {
594 } else if is_signed {
599 mir::BinOp::BitOr => bx.or(lhs, rhs),
600 mir::BinOp::BitAnd => bx.and(lhs, rhs),
601 mir::BinOp::BitXor => bx.xor(lhs, rhs),
602 mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
603 mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
604 mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
605 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
606 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
607 bx.cx().const_bool(match op {
608 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
609 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
614 base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
619 base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
626 pub fn codegen_fat_ptr_binop(
628 bx: &Builder<'a, 'll, 'tcx>,
630 lhs_addr: &'ll Value,
631 lhs_extra: &'ll Value,
632 rhs_addr: &'ll Value,
633 rhs_extra: &'ll Value,
639 bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
640 bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra)
645 bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr),
646 bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra)
649 mir::BinOp::Le | mir::BinOp::Lt |
650 mir::BinOp::Ge | mir::BinOp::Gt => {
651 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
652 let (op, strict_op) = match op {
653 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
654 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
655 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
656 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
661 bx.icmp(strict_op, lhs_addr, rhs_addr),
663 bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
664 bx.icmp(op, lhs_extra, rhs_extra)
669 bug!("unexpected fat ptr binop");
674 pub fn codegen_scalar_checked_binop(&mut self,
675 bx: &Builder<'a, 'll, 'tcx>,
679 input_ty: Ty<'tcx>) -> OperandValue<&'ll Value> {
680 // This case can currently arise only from functions marked
681 // with #[rustc_inherit_overflow_checks] and inlined from
682 // another crate (mostly core::num generic/#[inline] fns),
683 // while the current crate doesn't use overflow checks.
684 if !bx.cx().check_overflow {
685 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
686 return OperandValue::Pair(val, bx.cx().const_bool(false));
689 let (val, of) = match op {
690 // These are checked using intrinsics
691 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
693 mir::BinOp::Add => OverflowOp::Add,
694 mir::BinOp::Sub => OverflowOp::Sub,
695 mir::BinOp::Mul => OverflowOp::Mul,
698 let intrinsic = get_overflow_intrinsic(oop, bx, input_ty);
699 let res = bx.call(intrinsic, &[lhs, rhs], None);
701 (bx.extract_value(res, 0),
702 bx.extract_value(res, 1))
704 mir::BinOp::Shl | mir::BinOp::Shr => {
705 let lhs_llty = bx.cx().val_ty(lhs);
706 let rhs_llty = bx.cx().val_ty(rhs);
707 let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
708 let outer_bits = bx.and(rhs, invert_mask);
710 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
711 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
716 bug!("Operator `{:?}` is not a checkable operator", op)
720 OperandValue::Pair(val, of)
723 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
725 mir::Rvalue::Ref(..) |
726 mir::Rvalue::Len(..) |
727 mir::Rvalue::Cast(..) | // (*)
728 mir::Rvalue::BinaryOp(..) |
729 mir::Rvalue::CheckedBinaryOp(..) |
730 mir::Rvalue::UnaryOp(..) |
731 mir::Rvalue::Discriminant(..) |
732 mir::Rvalue::NullaryOp(..) |
733 mir::Rvalue::Use(..) => // (*)
735 mir::Rvalue::Repeat(..) |
736 mir::Rvalue::Aggregate(..) => {
737 let ty = rvalue.ty(self.mir, self.cx.tcx);
738 let ty = self.monomorphize(&ty);
739 self.cx.layout_of(ty).is_zst()
743 // (*) this is only true if the type is suitable
747 #[derive(Copy, Clone)]
752 fn get_overflow_intrinsic(
754 bx: &Builder<'_, 'll, '_>,
757 use syntax::ast::IntTy::*;
758 use syntax::ast::UintTy::*;
759 use rustc::ty::{Int, Uint};
763 let new_sty = match ty.sty {
764 Int(Isize) => Int(tcx.sess.target.isize_ty),
765 Uint(Usize) => Uint(tcx.sess.target.usize_ty),
766 ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
767 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
770 let name = match oop {
771 OverflowOp::Add => match new_sty {
772 Int(I8) => "llvm.sadd.with.overflow.i8",
773 Int(I16) => "llvm.sadd.with.overflow.i16",
774 Int(I32) => "llvm.sadd.with.overflow.i32",
775 Int(I64) => "llvm.sadd.with.overflow.i64",
776 Int(I128) => "llvm.sadd.with.overflow.i128",
778 Uint(U8) => "llvm.uadd.with.overflow.i8",
779 Uint(U16) => "llvm.uadd.with.overflow.i16",
780 Uint(U32) => "llvm.uadd.with.overflow.i32",
781 Uint(U64) => "llvm.uadd.with.overflow.i64",
782 Uint(U128) => "llvm.uadd.with.overflow.i128",
786 OverflowOp::Sub => match new_sty {
787 Int(I8) => "llvm.ssub.with.overflow.i8",
788 Int(I16) => "llvm.ssub.with.overflow.i16",
789 Int(I32) => "llvm.ssub.with.overflow.i32",
790 Int(I64) => "llvm.ssub.with.overflow.i64",
791 Int(I128) => "llvm.ssub.with.overflow.i128",
793 Uint(U8) => "llvm.usub.with.overflow.i8",
794 Uint(U16) => "llvm.usub.with.overflow.i16",
795 Uint(U32) => "llvm.usub.with.overflow.i32",
796 Uint(U64) => "llvm.usub.with.overflow.i64",
797 Uint(U128) => "llvm.usub.with.overflow.i128",
801 OverflowOp::Mul => match new_sty {
802 Int(I8) => "llvm.smul.with.overflow.i8",
803 Int(I16) => "llvm.smul.with.overflow.i16",
804 Int(I32) => "llvm.smul.with.overflow.i32",
805 Int(I64) => "llvm.smul.with.overflow.i64",
806 Int(I128) => "llvm.smul.with.overflow.i128",
808 Uint(U8) => "llvm.umul.with.overflow.i8",
809 Uint(U16) => "llvm.umul.with.overflow.i16",
810 Uint(U32) => "llvm.umul.with.overflow.i32",
811 Uint(U64) => "llvm.umul.with.overflow.i64",
812 Uint(U128) => "llvm.umul.with.overflow.i128",
818 bx.cx().get_intrinsic(&name)
821 fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
825 float_ty: &'ll Type) -> &'ll Value {
826 // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
827 // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
828 // LLVM's uitofp produces undef in those cases, so we manually check for that case.
829 let is_u128_to_f32 = !signed &&
830 bx.cx().int_width(int_ty) == 128 &&
831 bx.cx().float_width(float_ty) == 32;
833 // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
834 // and for everything else LLVM's uitofp works just fine.
835 use rustc_apfloat::ieee::Single;
836 use rustc_apfloat::Float;
837 const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
838 << (Single::MAX_EXP - Single::PRECISION as i16);
839 let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
840 let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
841 let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
842 let infinity = bx.bitcast(infinity_bits, float_ty);
843 bx.select(overflow, infinity, bx.uitofp(x, float_ty))
846 bx.sitofp(x, float_ty)
848 bx.uitofp(x, float_ty)
853 fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
857 int_ty: &'ll Type) -> &'ll Value {
858 let fptosui_result = if signed {
864 if !bx.sess().opts.debugging_opts.saturating_float_casts {
865 return fptosui_result;
867 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
868 // destination integer type after rounding towards zero. This `undef` value can cause UB in
869 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
870 // Semantically, the mathematical value of the input is rounded towards zero to the next
871 // mathematical integer, and then the result is clamped into the range of the destination
872 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
873 // the destination integer type. NaN is mapped to 0.
875 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
876 // a value representable in int_ty.
877 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
878 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
879 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
880 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
881 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
882 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
883 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
884 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
885 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
886 let int_max = |signed: bool, int_ty: &'ll Type| -> u128 {
887 let shift_amount = 128 - bx.cx().int_width(int_ty);
889 i128::MAX as u128 >> shift_amount
891 u128::MAX >> shift_amount
894 let int_min = |signed: bool, int_ty: &'ll Type| -> i128 {
896 i128::MIN >> (128 - bx.cx().int_width(int_ty))
902 let compute_clamp_bounds_single = |signed: bool, int_ty: &'ll Type| -> (u128, u128) {
903 let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
904 assert_eq!(rounded_min.status, Status::OK);
905 let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
906 assert!(rounded_max.value.is_finite());
907 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
909 let compute_clamp_bounds_double = |signed: bool, int_ty: &'ll Type| -> (u128, u128) {
910 let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
911 assert_eq!(rounded_min.status, Status::OK);
912 let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
913 assert!(rounded_max.value.is_finite());
914 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
917 let float_bits_to_llval = |bits| {
918 let bits_llval = match bx.cx().float_width(float_ty) {
919 32 => bx.cx().const_u32(bits as u32),
920 64 => bx.cx().const_u64(bits as u64),
921 n => bug!("unsupported float width {}", n),
923 bx.bitcast(bits_llval, float_ty)
925 let (f_min, f_max) = match bx.cx().float_width(float_ty) {
926 32 => compute_clamp_bounds_single(signed, int_ty),
927 64 => compute_clamp_bounds_double(signed, int_ty),
928 n => bug!("unsupported float width {}", n),
930 let f_min = float_bits_to_llval(f_min);
931 let f_max = float_bits_to_llval(f_max);
932 // To implement saturation, we perform the following steps:
934 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
935 // 2. Compare x to f_min and f_max, and use the comparison results to select:
936 // a) int_ty::MIN if x < f_min or x is NaN
937 // b) int_ty::MAX if x > f_max
938 // c) the result of fpto[su]i otherwise
939 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
941 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
942 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
943 // undef does not introduce any non-determinism either.
944 // More importantly, the above procedure correctly implements saturating conversion.
946 // If x is NaN, 0 is returned by definition.
947 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
948 // This yields three cases to consider:
949 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
950 // saturating conversion for inputs in that range.
951 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
952 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
953 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
955 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
956 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
959 // Step 1 was already performed above.
961 // Step 2: We use two comparisons and two selects, with %s1 being the result:
962 // %less_or_nan = fcmp ult %x, %f_min
963 // %greater = fcmp olt %x, %f_max
964 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
965 // %s1 = select %greater, int_ty::MAX, %s0
966 // Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
967 // operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
968 // becomes int_ty::MIN if x is NaN.
969 // Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
970 // negation, and the negation can be merged into the select. Therefore, it not necessarily any
971 // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
972 // performed is ultimately up to the backend, but at least x86 does perform them.
973 let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
974 let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
975 let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
976 let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
977 let s0 = bx.select(less_or_nan, int_min, fptosui_result);
978 let s1 = bx.select(greater, int_max, s0);
980 // Step 3: NaN replacement.
981 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
982 // Therefore we only need to execute this step for signed integer types.
984 // LLVM has no isNaN predicate, so we use (x == x) instead
985 bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))