1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::ty::layout::Layout;
15 use rustc::ty::subst::{Kind, Subst};
16 use rustc::mir::tcx::LvalueTy;
18 use middle::lang_items::ExchangeMallocFnLangItem;
23 use common::{self, val_ty, C_bool, C_null, C_uint};
24 use common::{C_integral};
33 use super::MirContext;
34 use super::constant::const_scalar_checked_binop;
35 use super::operand::{OperandRef, OperandValue};
36 use super::lvalue::LvalueRef;
38 impl<'a, 'tcx> MirContext<'a, 'tcx> {
39 pub fn trans_rvalue(&mut self,
40 bcx: Builder<'a, 'tcx>,
41 dest: LvalueRef<'tcx>,
42 rvalue: &mir::Rvalue<'tcx>)
45 debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
46 Value(dest.llval), rvalue);
49 mir::Rvalue::Use(ref operand) => {
50 let tr_operand = self.trans_operand(&bcx, operand);
51 // FIXME: consider not copying constants through stack. (fixable by translating
52 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
53 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
57 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
58 let cast_ty = self.monomorphize(&cast_ty);
60 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
61 // into-coerce of a thin pointer to a fat pointer - just
62 // use the operand path.
63 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
64 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
68 // Unsize of a nontrivial struct. I would prefer for
69 // this to be eliminated by MIR translation, but
70 // `CoerceUnsized` can be passed by a where-clause,
71 // so the (generic) MIR may not be able to expand it.
72 let operand = self.trans_operand(&bcx, source);
73 let operand = operand.pack_if_pair(&bcx);
74 let llref = match operand.val {
75 OperandValue::Pair(..) => bug!(),
76 OperandValue::Immediate(llval) => {
77 // unsize from an immediate structure. We don't
78 // really need a temporary alloca here, but
79 // avoiding it would require us to have
80 // `coerce_unsized_into` use extractvalue to
81 // index into the struct, and this case isn't
82 // important enough for it.
83 debug!("trans_rvalue: creating ugly alloca");
84 let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
85 base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
88 OperandValue::Ref(llref, align) => {
89 LvalueRef::new_sized_ty(llref, operand.ty, align)
92 base::coerce_unsized_into(&bcx, &llref, &dest);
96 mir::Rvalue::Repeat(ref elem, ref count) => {
97 let tr_elem = self.trans_operand(&bcx, elem);
98 let size = count.as_u64(bcx.tcx().sess.target.uint_type);
99 let size = C_uint(bcx.ccx, size);
100 let base = base::get_dataptr(&bcx, dest.llval);
101 tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
102 self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
106 mir::Rvalue::Aggregate(ref kind, ref operands) => {
108 mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
109 let disr = Disr::for_variant(bcx.tcx(), adt_def, variant_index);
110 let dest_ty = dest.ty.to_ty(bcx.tcx());
111 adt::trans_set_discr(&bcx, dest_ty, dest.llval, disr);
112 for (i, operand) in operands.iter().enumerate() {
113 let op = self.trans_operand(&bcx, operand);
114 // Do not generate stores and GEPis for zero-sized fields.
115 if !common::type_is_zero_size(bcx.ccx, op.ty) {
116 let mut val = LvalueRef::new_sized(
117 dest.llval, dest.ty, dest.alignment);
118 let field_index = active_field_index.unwrap_or(i);
119 val.ty = LvalueTy::Downcast {
121 substs: self.monomorphize(&substs),
122 variant_index: variant_index,
124 let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
125 self.store_operand(&bcx, lldest_i, align.to_align(), op);
130 // If this is a tuple or closure, we need to translate GEP indices.
131 let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
132 let translation = if let Layout::Univariant { ref variant, .. } = *layout {
133 Some(&variant.memory_index)
137 let alignment = dest.alignment;
138 for (i, operand) in operands.iter().enumerate() {
139 let op = self.trans_operand(&bcx, operand);
140 // Do not generate stores and GEPis for zero-sized fields.
141 if !common::type_is_zero_size(bcx.ccx, op.ty) {
142 // Note: perhaps this should be StructGep, but
143 // note that in some cases the values here will
144 // not be structs but arrays.
145 let i = if let Some(ref t) = translation {
150 let dest = bcx.gepi(dest.llval, &[0, i]);
151 self.store_operand(&bcx, dest, alignment.to_align(), op);
160 assert!(rvalue_creates_operand(rvalue));
161 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
162 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
168 pub fn trans_rvalue_operand(&mut self,
169 bcx: Builder<'a, 'tcx>,
170 rvalue: &mir::Rvalue<'tcx>)
171 -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
173 assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
176 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
177 let operand = self.trans_operand(&bcx, source);
178 debug!("cast operand is {:?}", operand);
179 let cast_ty = self.monomorphize(&cast_ty);
181 let val = match *kind {
182 mir::CastKind::ReifyFnPointer => {
183 match operand.ty.sty {
184 ty::TyFnDef(def_id, substs, _) => {
185 OperandValue::Immediate(
186 Callee::def(bcx.ccx, def_id, substs)
190 bug!("{} cannot be reified to a fn ptr", operand.ty)
194 mir::CastKind::ClosureFnPointer => {
195 match operand.ty.sty {
196 ty::TyClosure(def_id, substs) => {
197 // Get the def_id for FnOnce::call_once
198 let fn_once = bcx.tcx().lang_items.fn_once_trait().unwrap();
199 let call_once = bcx.tcx()
200 .global_tcx().associated_items(fn_once)
201 .find(|it| it.kind == ty::AssociatedKind::Method)
203 // Now create its substs [Closure, Tuple]
204 let input = bcx.tcx().closure_type(def_id)
205 .subst(bcx.tcx(), substs.substs).input(0);
206 let substs = bcx.tcx().mk_substs([operand.ty, input.skip_binder()]
207 .iter().cloned().map(Kind::from));
208 OperandValue::Immediate(
209 Callee::def(bcx.ccx, call_once, substs)
213 bug!("{} cannot be cast to a fn ptr", operand.ty)
217 mir::CastKind::UnsafeFnPointer => {
218 // this is a no-op at the LLVM level
221 mir::CastKind::Unsize => {
222 // unsize targets other than to a fat pointer currently
223 // can't be operands.
224 assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
227 OperandValue::Pair(lldata, llextra) => {
228 // unsize from a fat pointer - this is a
229 // "trait-object-to-supertrait" coercion, for
231 // &'a fmt::Debug+Send => &'a fmt::Debug,
232 // So we need to pointercast the base to ensure
233 // the types match up.
234 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
235 let lldata = bcx.pointercast(lldata, llcast_ty);
236 OperandValue::Pair(lldata, llextra)
238 OperandValue::Immediate(lldata) => {
240 let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
241 operand.ty, cast_ty);
242 OperandValue::Pair(lldata, llextra)
244 OperandValue::Ref(..) => {
245 bug!("by-ref operand {:?} in trans_rvalue_operand",
250 mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
251 let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty);
252 let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty);
253 if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
254 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
255 let ll_cft = ll_cast_ty.field_types();
256 let ll_fft = ll_from_ty.field_types();
257 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
258 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
259 OperandValue::Pair(data_cast, meta_ptr)
260 } else { // cast to thin-ptr
261 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
262 // pointer-cast of that pointer to desired pointer type.
263 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
264 OperandValue::Immediate(llval)
267 bug!("Unexpected non-Pair operand")
270 mir::CastKind::Misc => {
271 debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
272 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
273 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
274 let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty);
275 let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty);
276 let llval = operand.immediate();
277 let l = bcx.ccx.layout_of(operand.ty);
278 let signed = if let Layout::CEnum { signed, min, max, .. } = *l {
280 // We want `table[e as usize]` to not
281 // have bound checks, and this is the most
282 // convenient place to put the `assume`.
284 base::call_assume(&bcx, bcx.icmp(
287 C_integral(common::val_ty(llval), max, false)
293 operand.ty.is_signed()
296 let newval = match (r_t_in, r_t_out) {
297 (CastTy::Int(_), CastTy::Int(_)) => {
298 bcx.intcast(llval, ll_t_out, signed)
300 (CastTy::Float, CastTy::Float) => {
301 let srcsz = ll_t_in.float_width();
302 let dstsz = ll_t_out.float_width();
304 bcx.fpext(llval, ll_t_out)
305 } else if srcsz > dstsz {
306 bcx.fptrunc(llval, ll_t_out)
311 (CastTy::Ptr(_), CastTy::Ptr(_)) |
312 (CastTy::FnPtr, CastTy::Ptr(_)) |
313 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
314 bcx.pointercast(llval, ll_t_out),
315 (CastTy::Ptr(_), CastTy::Int(_)) |
316 (CastTy::FnPtr, CastTy::Int(_)) =>
317 bcx.ptrtoint(llval, ll_t_out),
318 (CastTy::Int(_), CastTy::Ptr(_)) =>
319 bcx.inttoptr(llval, ll_t_out),
320 (CastTy::Int(_), CastTy::Float) if signed =>
321 bcx.sitofp(llval, ll_t_out),
322 (CastTy::Int(_), CastTy::Float) =>
323 bcx.uitofp(llval, ll_t_out),
324 (CastTy::Float, CastTy::Int(IntTy::I)) =>
325 bcx.fptosi(llval, ll_t_out),
326 (CastTy::Float, CastTy::Int(_)) =>
327 bcx.fptoui(llval, ll_t_out),
328 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
330 OperandValue::Immediate(newval)
333 let operand = OperandRef {
340 mir::Rvalue::Ref(_, bk, ref lvalue) => {
341 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
343 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
344 let ref_ty = bcx.tcx().mk_ref(
345 bcx.tcx().mk_region(ty::ReErased),
346 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
349 // Note: lvalues are indirect, so storing the `llval` into the
350 // destination effectively creates a reference.
351 let operand = if bcx.ccx.shared().type_is_sized(ty) {
353 val: OperandValue::Immediate(tr_lvalue.llval),
358 val: OperandValue::Pair(tr_lvalue.llval,
366 mir::Rvalue::Len(ref lvalue) => {
367 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
368 let operand = OperandRef {
369 val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
370 ty: bcx.tcx().types.usize,
375 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
376 let lhs = self.trans_operand(&bcx, lhs);
377 let rhs = self.trans_operand(&bcx, rhs);
378 let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
379 match (lhs.val, rhs.val) {
380 (OperandValue::Pair(lhs_addr, lhs_extra),
381 OperandValue::Pair(rhs_addr, rhs_extra)) => {
382 self.trans_fat_ptr_binop(&bcx, op,
391 self.trans_scalar_binop(&bcx, op,
392 lhs.immediate(), rhs.immediate(),
395 let operand = OperandRef {
396 val: OperandValue::Immediate(llresult),
397 ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
401 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
402 let lhs = self.trans_operand(&bcx, lhs);
403 let rhs = self.trans_operand(&bcx, rhs);
404 let result = self.trans_scalar_checked_binop(&bcx, op,
405 lhs.immediate(), rhs.immediate(),
407 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
408 let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
409 let operand = OperandRef {
417 mir::Rvalue::UnaryOp(op, ref operand) => {
418 let operand = self.trans_operand(&bcx, operand);
419 let lloperand = operand.immediate();
420 let is_float = operand.ty.is_fp();
421 let llval = match op {
422 mir::UnOp::Not => bcx.not(lloperand),
423 mir::UnOp::Neg => if is_float {
430 val: OperandValue::Immediate(llval),
435 mir::Rvalue::Discriminant(ref lvalue) => {
436 let discr_lvalue = self.trans_lvalue(&bcx, lvalue);
437 let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx());
438 let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
439 let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty);
440 let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval,
441 discr_lvalue.alignment, Some(discr_type), true);
443 val: OperandValue::Immediate(discr),
448 mir::Rvalue::Box(content_ty) => {
449 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
450 let llty = type_of::type_of(bcx.ccx, content_ty);
451 let llsize = machine::llsize_of(bcx.ccx, llty);
452 let align = type_of::align_of(bcx.ccx, content_ty);
453 let llalign = C_uint(bcx.ccx, align);
454 let llty_ptr = llty.ptr_to();
455 let box_ty = bcx.tcx().mk_box(content_ty);
458 let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
461 bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
464 let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]))
466 let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
468 let operand = OperandRef {
469 val: OperandValue::Immediate(val),
475 mir::Rvalue::Use(ref operand) => {
476 let operand = self.trans_operand(&bcx, operand);
479 mir::Rvalue::Repeat(..) |
480 mir::Rvalue::Aggregate(..) => {
481 bug!("cannot generate operand from rvalue {:?}", rvalue);
487 pub fn trans_scalar_binop(&mut self,
488 bcx: &Builder<'a, 'tcx>,
492 input_ty: Ty<'tcx>) -> ValueRef {
493 let is_float = input_ty.is_fp();
494 let is_signed = input_ty.is_signed();
495 let is_nil = input_ty.is_nil();
496 let is_bool = input_ty.is_bool();
498 mir::BinOp::Add => if is_float {
503 mir::BinOp::Sub => if is_float {
508 mir::BinOp::Mul => if is_float {
513 mir::BinOp::Div => if is_float {
515 } else if is_signed {
520 mir::BinOp::Rem => if is_float {
522 } else if is_signed {
527 mir::BinOp::BitOr => bcx.or(lhs, rhs),
528 mir::BinOp::BitAnd => bcx.and(lhs, rhs),
529 mir::BinOp::BitXor => bcx.xor(lhs, rhs),
530 mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
531 mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
532 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
533 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
534 C_bool(bcx.ccx, match op {
535 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
536 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
541 base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
545 let (lhs, rhs) = if is_bool {
546 // FIXME(#36856) -- extend the bools into `i8` because
547 // LLVM's i1 comparisons are broken.
548 (bcx.zext(lhs, Type::i8(bcx.ccx)),
549 bcx.zext(rhs, Type::i8(bcx.ccx)))
555 base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
562 pub fn trans_fat_ptr_binop(&mut self,
563 bcx: &Builder<'a, 'tcx>,
574 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
575 bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
580 bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
581 bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
584 mir::BinOp::Le | mir::BinOp::Lt |
585 mir::BinOp::Ge | mir::BinOp::Gt => {
586 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
587 let (op, strict_op) = match op {
588 mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
589 mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
590 mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
591 mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
596 bcx.icmp(strict_op, lhs_addr, rhs_addr),
598 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
599 bcx.icmp(op, lhs_extra, rhs_extra)
604 bug!("unexpected fat ptr binop");
609 pub fn trans_scalar_checked_binop(&mut self,
610 bcx: &Builder<'a, 'tcx>,
614 input_ty: Ty<'tcx>) -> OperandValue {
615 // This case can currently arise only from functions marked
616 // with #[rustc_inherit_overflow_checks] and inlined from
617 // another crate (mostly core::num generic/#[inline] fns),
618 // while the current crate doesn't use overflow checks.
619 if !bcx.ccx.check_overflow() {
620 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
621 return OperandValue::Pair(val, C_bool(bcx.ccx, false));
624 // First try performing the operation on constants, which
625 // will only succeed if both operands are constant.
626 // This is necessary to determine when an overflow Assert
627 // will always panic at runtime, and produce a warning.
628 if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
629 return OperandValue::Pair(val, C_bool(bcx.ccx, of));
632 let (val, of) = match op {
633 // These are checked using intrinsics
634 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
636 mir::BinOp::Add => OverflowOp::Add,
637 mir::BinOp::Sub => OverflowOp::Sub,
638 mir::BinOp::Mul => OverflowOp::Mul,
641 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
642 let res = bcx.call(intrinsic, &[lhs, rhs], None);
644 (bcx.extract_value(res, 0),
645 bcx.extract_value(res, 1))
647 mir::BinOp::Shl | mir::BinOp::Shr => {
648 let lhs_llty = val_ty(lhs);
649 let rhs_llty = val_ty(rhs);
650 let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
651 let outer_bits = bcx.and(rhs, invert_mask);
653 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
654 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
659 bug!("Operator `{:?}` is not a checkable operator", op)
663 OperandValue::Pair(val, of)
667 pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool {
669 mir::Rvalue::Ref(..) |
670 mir::Rvalue::Len(..) |
671 mir::Rvalue::Cast(..) | // (*)
672 mir::Rvalue::BinaryOp(..) |
673 mir::Rvalue::CheckedBinaryOp(..) |
674 mir::Rvalue::UnaryOp(..) |
675 mir::Rvalue::Discriminant(..) |
676 mir::Rvalue::Box(..) |
677 mir::Rvalue::Use(..) =>
679 mir::Rvalue::Repeat(..) |
680 mir::Rvalue::Aggregate(..) =>
684 // (*) this is only true if the type is suitable
687 #[derive(Copy, Clone)]
692 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
693 use syntax::ast::IntTy::*;
694 use syntax::ast::UintTy::*;
695 use rustc::ty::{TyInt, TyUint};
699 let new_sty = match ty.sty {
700 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
704 _ => panic!("unsupported target word size")
706 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
710 _ => panic!("unsupported target word size")
712 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
713 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
716 let name = match oop {
717 OverflowOp::Add => match new_sty {
718 TyInt(I8) => "llvm.sadd.with.overflow.i8",
719 TyInt(I16) => "llvm.sadd.with.overflow.i16",
720 TyInt(I32) => "llvm.sadd.with.overflow.i32",
721 TyInt(I64) => "llvm.sadd.with.overflow.i64",
722 TyInt(I128) => "llvm.sadd.with.overflow.i128",
724 TyUint(U8) => "llvm.uadd.with.overflow.i8",
725 TyUint(U16) => "llvm.uadd.with.overflow.i16",
726 TyUint(U32) => "llvm.uadd.with.overflow.i32",
727 TyUint(U64) => "llvm.uadd.with.overflow.i64",
728 TyUint(U128) => "llvm.uadd.with.overflow.i128",
732 OverflowOp::Sub => match new_sty {
733 TyInt(I8) => "llvm.ssub.with.overflow.i8",
734 TyInt(I16) => "llvm.ssub.with.overflow.i16",
735 TyInt(I32) => "llvm.ssub.with.overflow.i32",
736 TyInt(I64) => "llvm.ssub.with.overflow.i64",
737 TyInt(I128) => "llvm.ssub.with.overflow.i128",
739 TyUint(U8) => "llvm.usub.with.overflow.i8",
740 TyUint(U16) => "llvm.usub.with.overflow.i16",
741 TyUint(U32) => "llvm.usub.with.overflow.i32",
742 TyUint(U64) => "llvm.usub.with.overflow.i64",
743 TyUint(U128) => "llvm.usub.with.overflow.i128",
747 OverflowOp::Mul => match new_sty {
748 TyInt(I8) => "llvm.smul.with.overflow.i8",
749 TyInt(I16) => "llvm.smul.with.overflow.i16",
750 TyInt(I32) => "llvm.smul.with.overflow.i32",
751 TyInt(I64) => "llvm.smul.with.overflow.i64",
752 TyInt(I128) => "llvm.smul.with.overflow.i128",
754 TyUint(U8) => "llvm.umul.with.overflow.i8",
755 TyUint(U16) => "llvm.umul.with.overflow.i16",
756 TyUint(U32) => "llvm.umul.with.overflow.i32",
757 TyUint(U64) => "llvm.umul.with.overflow.i64",
758 TyUint(U128) => "llvm.umul.with.overflow.i128",
764 bcx.ccx.get_intrinsic(&name)