1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
6 use crate::common::{self, IntPredicate, RealPredicate};
10 use rustc_apfloat::{ieee, Float, Round, Status};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_middle::mir;
13 use rustc_middle::ty::cast::{CastTy, IntTy};
14 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
15 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
16 use rustc_span::source_map::{Span, DUMMY_SP};
17 use rustc_target::abi::{Abi, Int, Variants};
19 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
20 pub fn codegen_rvalue(
23 dest: PlaceRef<'tcx, Bx::Value>,
24 rvalue: &mir::Rvalue<'tcx>,
26 debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
29 mir::Rvalue::Use(ref operand) => {
30 let cg_operand = self.codegen_operand(&mut bx, operand);
31 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
32 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
33 cg_operand.val.store(&mut bx, dest);
37 mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
38 // The destination necessarily contains a fat pointer, so if
39 // it's a scalar pair, it's a fat pointer or newtype thereof.
40 if bx.cx().is_backend_scalar_pair(dest.layout) {
41 // Into-coerce of a thin pointer to a fat pointer -- just
42 // use the operand path.
43 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
44 temp.val.store(&mut bx, dest);
48 // Unsize of a nontrivial struct. I would prefer for
49 // this to be eliminated by MIR building, but
50 // `CoerceUnsized` can be passed by a where-clause,
51 // so the (generic) MIR may not be able to expand it.
52 let operand = self.codegen_operand(&mut bx, source);
54 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
55 // Unsize from an immediate structure. We don't
56 // really need a temporary alloca here, but
57 // avoiding it would require us to have
58 // `coerce_unsized_into` use `extractvalue` to
59 // index into the struct, and this case isn't
60 // important enough for it.
61 debug!("codegen_rvalue: creating ugly alloca");
62 let scratch = PlaceRef::alloca(&mut bx, operand.layout);
63 scratch.storage_live(&mut bx);
64 operand.val.store(&mut bx, scratch);
65 base::coerce_unsized_into(&mut bx, scratch, dest);
66 scratch.storage_dead(&mut bx);
68 OperandValue::Ref(llref, None, align) => {
69 let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
70 base::coerce_unsized_into(&mut bx, source, dest);
72 OperandValue::Ref(_, Some(_), _) => {
73 bug!("unsized coercion on an unsized rvalue");
79 mir::Rvalue::Repeat(ref elem, count) => {
80 let cg_elem = self.codegen_operand(&mut bx, elem);
82 // Do not generate the loop for zero-sized elements or empty arrays.
83 if dest.layout.is_zst() {
87 if let OperandValue::Immediate(v) = cg_elem.val {
88 let zero = bx.const_usize(0);
89 let start = dest.project_index(&mut bx, zero).llval;
90 let size = bx.const_usize(dest.layout.size.bytes());
92 // Use llvm.memset.p0i8.* to initialize all zero arrays
93 if bx.cx().const_to_opt_uint(v) == Some(0) {
94 let fill = bx.cx().const_u8(0);
95 bx.memset(start, fill, size, dest.align, MemFlags::empty());
99 // Use llvm.memset.p0i8.* to initialize byte arrays
100 let v = bx.from_immediate(v);
101 if bx.cx().val_ty(v) == bx.cx().type_i8() {
102 bx.memset(start, v, size, dest.align, MemFlags::empty());
108 self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
110 bx.write_operand_repeatedly(cg_elem, count, dest)
113 mir::Rvalue::Aggregate(ref kind, ref operands) => {
114 let (dest, active_field_index) = match **kind {
115 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
116 dest.codegen_set_discr(&mut bx, variant_index);
117 if adt_def.is_enum() {
118 (dest.project_downcast(&mut bx, variant_index), active_field_index)
120 (dest, active_field_index)
125 for (i, operand) in operands.iter().enumerate() {
126 let op = self.codegen_operand(&mut bx, operand);
127 // Do not generate stores and GEPis for zero-sized fields.
128 if !op.layout.is_zst() {
129 let field_index = active_field_index.unwrap_or(i);
130 let field = dest.project_field(&mut bx, field_index);
131 op.val.store(&mut bx, field);
138 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
139 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
140 temp.val.store(&mut bx, dest);
146 pub fn codegen_rvalue_unsized(
149 indirect_dest: PlaceRef<'tcx, Bx::Value>,
150 rvalue: &mir::Rvalue<'tcx>,
153 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
154 indirect_dest.llval, rvalue
158 mir::Rvalue::Use(ref operand) => {
159 let cg_operand = self.codegen_operand(&mut bx, operand);
160 cg_operand.val.store_unsized(&mut bx, indirect_dest);
164 _ => bug!("unsized assignment other than `Rvalue::Use`"),
168 pub fn codegen_rvalue_operand(
171 rvalue: &mir::Rvalue<'tcx>,
172 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
174 self.rvalue_creates_operand(rvalue, DUMMY_SP),
175 "cannot codegen {:?} to operand",
180 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
181 let operand = self.codegen_operand(&mut bx, source);
182 debug!("cast operand is {:?}", operand);
183 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
185 let val = match *kind {
186 mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
187 match *operand.layout.ty.kind() {
188 ty::FnDef(def_id, substs) => {
189 let instance = ty::Instance::resolve_for_fn_ptr(
191 ty::ParamEnv::reveal_all(),
196 .polymorphize(bx.cx().tcx());
197 OperandValue::Immediate(bx.get_fn_addr(instance))
199 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
202 mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
203 match *operand.layout.ty.kind() {
204 ty::Closure(def_id, substs) => {
205 let instance = Instance::resolve_closure(
209 ty::ClosureKind::FnOnce,
211 .polymorphize(bx.cx().tcx());
212 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
214 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
217 mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
218 // This is a no-op at the LLVM level.
221 mir::CastKind::Pointer(PointerCast::Unsize) => {
222 assert!(bx.cx().is_backend_scalar_pair(cast));
223 let (lldata, llextra) = match operand.val {
224 OperandValue::Pair(lldata, llextra) => {
225 // unsize from a fat pointer -- this is a
226 // "trait-object-to-supertrait" coercion.
227 (lldata, Some(llextra))
229 OperandValue::Immediate(lldata) => {
233 OperandValue::Ref(..) => {
234 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
237 let (lldata, llextra) =
238 base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
239 OperandValue::Pair(lldata, llextra)
241 mir::CastKind::Pointer(PointerCast::MutToConstPointer)
242 | mir::CastKind::Misc
243 if bx.cx().is_backend_scalar_pair(operand.layout) =>
245 if let OperandValue::Pair(data_ptr, meta) = operand.val {
246 if bx.cx().is_backend_scalar_pair(cast) {
247 let data_cast = bx.pointercast(
249 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
251 OperandValue::Pair(data_cast, meta)
254 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
255 // pointer-cast of that pointer to desired pointer type.
256 let llcast_ty = bx.cx().immediate_backend_type(cast);
257 let llval = bx.pointercast(data_ptr, llcast_ty);
258 OperandValue::Immediate(llval)
261 bug!("unexpected non-pair operand");
264 mir::CastKind::Pointer(
265 PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
267 | mir::CastKind::Misc => {
268 assert!(bx.cx().is_backend_immediate(cast));
269 let ll_t_out = bx.cx().immediate_backend_type(cast);
270 if operand.layout.abi.is_uninhabited() {
271 let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
272 return (bx, OperandRef { val, layout: cast });
275 CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
276 let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
277 let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
278 match operand.layout.variants {
279 Variants::Single { index } => {
281 operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
283 let discr_layout = bx.cx().layout_of(discr.ty);
284 let discr_t = bx.cx().immediate_backend_type(discr_layout);
285 let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
287 bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
292 val: OperandValue::Immediate(discr_val),
298 Variants::Multiple { .. } => {}
300 let llval = operand.immediate();
302 let mut signed = false;
303 if let Abi::Scalar(ref scalar) = operand.layout.abi {
304 if let Int(_, s) = scalar.value {
305 // We use `i1` for bytes that are always `0` or `1`,
306 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
307 // let LLVM interpret the `i1` as signed, because
308 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
309 signed = !scalar.is_bool() && s;
311 let er = scalar.valid_range_exclusive(bx.cx());
312 if er.end != er.start
313 && scalar.valid_range.end >= scalar.valid_range.start
315 // We want `table[e as usize ± k]` to not
316 // have bound checks, and this is the most
317 // convenient place to put the `assume`s.
318 if scalar.valid_range.start > 0 {
319 let enum_value_lower_bound = bx
321 .const_uint_big(ll_t_in, scalar.valid_range.start);
322 let cmp_start = bx.icmp(
323 IntPredicate::IntUGE,
325 enum_value_lower_bound,
327 bx.assume(cmp_start);
330 let enum_value_upper_bound =
331 bx.cx().const_uint_big(ll_t_in, scalar.valid_range.end);
332 let cmp_end = bx.icmp(
333 IntPredicate::IntULE,
335 enum_value_upper_bound,
342 let newval = match (r_t_in, r_t_out) {
343 (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
344 (CastTy::Float, CastTy::Float) => {
345 let srcsz = bx.cx().float_width(ll_t_in);
346 let dstsz = bx.cx().float_width(ll_t_out);
348 bx.fpext(llval, ll_t_out)
349 } else if srcsz > dstsz {
350 bx.fptrunc(llval, ll_t_out)
355 (CastTy::Int(_), CastTy::Float) => {
357 bx.sitofp(llval, ll_t_out)
359 bx.uitofp(llval, ll_t_out)
362 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
363 bx.pointercast(llval, ll_t_out)
365 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
366 bx.ptrtoint(llval, ll_t_out)
368 (CastTy::Int(_), CastTy::Ptr(_)) => {
369 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
370 bx.inttoptr(usize_llval, ll_t_out)
372 (CastTy::Float, CastTy::Int(IntTy::I)) => {
373 cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out)
375 (CastTy::Float, CastTy::Int(_)) => {
376 cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out)
378 _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
380 OperandValue::Immediate(newval)
383 (bx, OperandRef { val, layout: cast })
386 mir::Rvalue::Ref(_, bk, place) => {
387 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
389 tcx.lifetimes.re_erased,
390 ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
393 self.codegen_place_to_pointer(bx, place, mk_ref)
396 mir::Rvalue::AddressOf(mutability, place) => {
397 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
398 tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
400 self.codegen_place_to_pointer(bx, place, mk_ptr)
403 mir::Rvalue::Len(place) => {
404 let size = self.evaluate_array_len(&mut bx, place);
405 let operand = OperandRef {
406 val: OperandValue::Immediate(size),
407 layout: bx.cx().layout_of(bx.tcx().types.usize),
412 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
413 let lhs = self.codegen_operand(&mut bx, lhs);
414 let rhs = self.codegen_operand(&mut bx, rhs);
415 let llresult = match (lhs.val, rhs.val) {
417 OperandValue::Pair(lhs_addr, lhs_extra),
418 OperandValue::Pair(rhs_addr, rhs_extra),
419 ) => self.codegen_fat_ptr_binop(
429 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
430 self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
435 let operand = OperandRef {
436 val: OperandValue::Immediate(llresult),
437 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
441 mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
442 let lhs = self.codegen_operand(&mut bx, lhs);
443 let rhs = self.codegen_operand(&mut bx, rhs);
444 let result = self.codegen_scalar_checked_binop(
451 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
452 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
453 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
458 mir::Rvalue::UnaryOp(op, ref operand) => {
459 let operand = self.codegen_operand(&mut bx, operand);
460 let lloperand = operand.immediate();
461 let is_float = operand.layout.ty.is_floating_point();
462 let llval = match op {
463 mir::UnOp::Not => bx.not(lloperand),
472 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
475 mir::Rvalue::Discriminant(ref place) => {
476 let discr_ty = rvalue.ty(self.mir, bx.tcx());
477 let discr_ty = self.monomorphize(discr_ty);
479 .codegen_place(&mut bx, place.as_ref())
480 .codegen_get_discr(&mut bx, discr_ty);
484 val: OperandValue::Immediate(discr),
485 layout: self.cx.layout_of(discr_ty),
490 mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
491 let content_ty = self.monomorphize(content_ty);
492 let content_layout = bx.cx().layout_of(content_ty);
493 let llsize = bx.cx().const_usize(content_layout.size.bytes());
494 let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
495 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
496 let llty_ptr = bx.cx().backend_type(box_layout);
499 let def_id = match bx.tcx().lang_items().require(LangItem::ExchangeMalloc) {
502 bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
505 let instance = ty::Instance::mono(bx.tcx(), def_id);
506 let r = bx.cx().get_fn_addr(instance);
507 let ty = bx.type_func(&[bx.type_isize(), bx.type_isize()], bx.type_i8p());
508 let call = bx.call(ty, r, &[llsize, llalign], None);
509 let val = bx.pointercast(call, llty_ptr);
511 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
515 mir::Rvalue::NullaryOp(null_op, ty) => {
516 let ty = self.monomorphize(ty);
517 assert!(bx.cx().type_is_sized(ty));
518 let layout = bx.cx().layout_of(ty);
519 let val = match null_op {
520 mir::NullOp::SizeOf => layout.size.bytes(),
521 mir::NullOp::AlignOf => layout.align.abi.bytes(),
522 mir::NullOp::Box => unreachable!(),
524 let val = bx.cx().const_usize(val);
525 let tcx = self.cx.tcx();
529 val: OperandValue::Immediate(val),
530 layout: self.cx.layout_of(tcx.types.usize),
535 mir::Rvalue::ThreadLocalRef(def_id) => {
536 assert!(bx.cx().tcx().is_static(def_id));
537 let static_ = bx.get_static(def_id);
538 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
539 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
542 mir::Rvalue::Use(ref operand) => {
543 let operand = self.codegen_operand(&mut bx, operand);
546 mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
547 // According to `rvalue_creates_operand`, only ZST
548 // aggregate rvalues are allowed to be operands.
549 let ty = rvalue.ty(self.mir, self.cx.tcx());
551 OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
557 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
558 // ZST are passed as operands and require special handling
559 // because codegen_place() panics if Local is operand.
560 if let Some(index) = place.as_local() {
561 if let LocalRef::Operand(Some(op)) = self.locals[index] {
562 if let ty::Array(_, n) = op.layout.ty.kind() {
563 let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
564 return bx.cx().const_usize(n);
568 // use common size calculation for non zero-sized types
569 let cg_value = self.codegen_place(bx, place.as_ref());
570 cg_value.len(bx.cx())
573 /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
574 fn codegen_place_to_pointer(
577 place: mir::Place<'tcx>,
578 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
579 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
580 let cg_place = self.codegen_place(&mut bx, place.as_ref());
582 let ty = cg_place.layout.ty;
584 // Note: places are indirect, so storing the `llval` into the
585 // destination effectively creates a reference.
586 let val = if !bx.cx().type_has_metadata(ty) {
587 OperandValue::Immediate(cg_place.llval)
589 OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
591 (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
594 pub fn codegen_scalar_binop(
602 let is_float = input_ty.is_floating_point();
603 let is_signed = input_ty.is_signed();
629 } else if is_signed {
638 } else if is_signed {
644 mir::BinOp::BitOr => bx.or(lhs, rhs),
645 mir::BinOp::BitAnd => bx.and(lhs, rhs),
646 mir::BinOp::BitXor => bx.xor(lhs, rhs),
647 mir::BinOp::Offset => {
648 let pointee_type = input_ty
650 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
652 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
653 bx.inbounds_gep(llty, lhs, &[rhs])
655 mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
656 mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
662 | mir::BinOp::Ge => {
664 bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
666 bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
672 pub fn codegen_fat_ptr_binop(
677 lhs_extra: Bx::Value,
679 rhs_extra: Bx::Value,
684 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
685 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
689 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
690 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
693 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
694 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
695 let (op, strict_op) = match op {
696 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
697 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
698 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
699 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
702 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
703 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
704 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
705 let rhs = bx.and(and_lhs, and_rhs);
709 bug!("unexpected fat ptr binop");
714 pub fn codegen_scalar_checked_binop(
721 ) -> OperandValue<Bx::Value> {
722 // This case can currently arise only from functions marked
723 // with #[rustc_inherit_overflow_checks] and inlined from
724 // another crate (mostly core::num generic/#[inline] fns),
725 // while the current crate doesn't use overflow checks.
726 if !bx.cx().check_overflow() {
727 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
728 return OperandValue::Pair(val, bx.cx().const_bool(false));
731 let (val, of) = match op {
732 // These are checked using intrinsics
733 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
735 mir::BinOp::Add => OverflowOp::Add,
736 mir::BinOp::Sub => OverflowOp::Sub,
737 mir::BinOp::Mul => OverflowOp::Mul,
740 bx.checked_binop(oop, input_ty, lhs, rhs)
742 mir::BinOp::Shl | mir::BinOp::Shr => {
743 let lhs_llty = bx.cx().val_ty(lhs);
744 let rhs_llty = bx.cx().val_ty(rhs);
745 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
746 let outer_bits = bx.and(rhs, invert_mask);
748 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
749 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
753 _ => bug!("Operator `{:?}` is not a checkable operator", op),
756 OperandValue::Pair(val, of)
760 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
761 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
763 mir::Rvalue::Ref(..) |
764 mir::Rvalue::AddressOf(..) |
765 mir::Rvalue::Len(..) |
766 mir::Rvalue::Cast(..) | // (*)
767 mir::Rvalue::BinaryOp(..) |
768 mir::Rvalue::CheckedBinaryOp(..) |
769 mir::Rvalue::UnaryOp(..) |
770 mir::Rvalue::Discriminant(..) |
771 mir::Rvalue::NullaryOp(..) |
772 mir::Rvalue::ThreadLocalRef(_) |
773 mir::Rvalue::Use(..) => // (*)
775 mir::Rvalue::Repeat(..) |
776 mir::Rvalue::Aggregate(..) => {
777 let ty = rvalue.ty(self.mir, self.cx.tcx());
778 let ty = self.monomorphize(ty);
779 self.cx.spanned_layout_of(ty, span).is_zst()
783 // (*) this is only true if the type is suitable
787 fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
794 if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts {
795 return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
798 let try_sat_result = if signed { bx.fptosi_sat(x, int_ty) } else { bx.fptoui_sat(x, int_ty) };
799 if let Some(try_sat_result) = try_sat_result {
800 return try_sat_result;
803 let int_width = bx.cx().int_width(int_ty);
804 let float_width = bx.cx().float_width(float_ty);
805 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
806 // destination integer type after rounding towards zero. This `undef` value can cause UB in
807 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
808 // Semantically, the mathematical value of the input is rounded towards zero to the next
809 // mathematical integer, and then the result is clamped into the range of the destination
810 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
811 // the destination integer type. NaN is mapped to 0.
813 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
814 // a value representable in int_ty.
815 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
816 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
817 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
818 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
819 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
820 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
821 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
822 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
823 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
824 let int_max = |signed: bool, int_width: u64| -> u128 {
825 let shift_amount = 128 - int_width;
826 if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
828 let int_min = |signed: bool, int_width: u64| -> i128 {
829 if signed { i128::MIN >> (128 - int_width) } else { 0 }
832 let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
833 let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
834 assert_eq!(rounded_min.status, Status::OK);
835 let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
836 assert!(rounded_max.value.is_finite());
837 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
839 let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
840 let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
841 assert_eq!(rounded_min.status, Status::OK);
842 let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
843 assert!(rounded_max.value.is_finite());
844 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
847 let mut float_bits_to_llval = |bits| {
848 let bits_llval = match float_width {
849 32 => bx.cx().const_u32(bits as u32),
850 64 => bx.cx().const_u64(bits as u64),
851 n => bug!("unsupported float width {}", n),
853 bx.bitcast(bits_llval, float_ty)
855 let (f_min, f_max) = match float_width {
856 32 => compute_clamp_bounds_single(signed, int_width),
857 64 => compute_clamp_bounds_double(signed, int_width),
858 n => bug!("unsupported float width {}", n),
860 let f_min = float_bits_to_llval(f_min);
861 let f_max = float_bits_to_llval(f_max);
862 // To implement saturation, we perform the following steps:
864 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
865 // 2. Compare x to f_min and f_max, and use the comparison results to select:
866 // a) int_ty::MIN if x < f_min or x is NaN
867 // b) int_ty::MAX if x > f_max
868 // c) the result of fpto[su]i otherwise
869 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
871 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
872 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
873 // undef does not introduce any non-determinism either.
874 // More importantly, the above procedure correctly implements saturating conversion.
876 // If x is NaN, 0 is returned by definition.
877 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
878 // This yields three cases to consider:
879 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
880 // saturating conversion for inputs in that range.
881 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
882 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
883 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
885 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
886 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
889 let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
890 let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
891 let zero = bx.cx().const_uint(int_ty, 0);
894 let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
895 let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
896 let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
898 // Step 2: We use two comparisons and two selects, with %s1 being the
900 // %less_or_nan = fcmp ult %x, %f_min
901 // %greater = fcmp olt %x, %f_max
902 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
903 // %s1 = select %greater, int_ty::MAX, %s0
904 // Note that %less_or_nan uses an *unordered* comparison. This
905 // comparison is true if the operands are not comparable (i.e., if x is
906 // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
909 // Performance note: Unordered comparison can be lowered to a "flipped"
910 // comparison and a negation, and the negation can be merged into the
911 // select. Therefore, it not necessarily any more expensive than an
912 // ordered ("normal") comparison. Whether these optimizations will be
913 // performed is ultimately up to the backend, but at least x86 does
915 let s0 = bx.select(less_or_nan, int_min, fptosui_result);
916 let s1 = bx.select(greater, int_max, s0);
918 // Step 3: NaN replacement.
919 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
920 // Therefore we only need to execute this step for signed integer types.
922 // LLVM has no isNaN predicate, so we use (x == x) instead
923 let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
924 bx.select(cmp, s1, zero)