1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::ty::layout::Layout;
15 use rustc::mir::tcx::LvalueTy;
17 use middle::lang_items::ExchangeMallocFnLangItem;
23 use common::{self, val_ty, C_bool, C_null, C_uint};
24 use common::{C_integral};
33 use super::MirContext;
34 use super::constant::const_scalar_checked_binop;
35 use super::operand::{OperandRef, OperandValue};
36 use super::lvalue::{LvalueRef};
38 impl<'a, 'tcx> MirContext<'a, 'tcx> {
39 pub fn trans_rvalue(&mut self,
40 bcx: Builder<'a, 'tcx>,
41 dest: LvalueRef<'tcx>,
42 rvalue: &mir::Rvalue<'tcx>)
45 debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
46 Value(dest.llval), rvalue);
49 mir::Rvalue::Use(ref operand) => {
50 let tr_operand = self.trans_operand(&bcx, operand);
51 // FIXME: consider not copying constants through stack. (fixable by translating
52 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
53 self.store_operand(&bcx, dest.llval, tr_operand, None);
57 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
58 let cast_ty = self.monomorphize(&cast_ty);
60 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
61 // into-coerce of a thin pointer to a fat pointer - just
62 // use the operand path.
63 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
64 self.store_operand(&bcx, dest.llval, temp, None);
68 // Unsize of a nontrivial struct. I would prefer for
69 // this to be eliminated by MIR translation, but
70 // `CoerceUnsized` can be passed by a where-clause,
71 // so the (generic) MIR may not be able to expand it.
72 let operand = self.trans_operand(&bcx, source);
73 let operand = operand.pack_if_pair(&bcx);
74 let llref = match operand.val {
75 OperandValue::Pair(..) => bug!(),
76 OperandValue::Immediate(llval) => {
77 // unsize from an immediate structure. We don't
78 // really need a temporary alloca here, but
79 // avoiding it would require us to have
80 // `coerce_unsized_into` use extractvalue to
81 // index into the struct, and this case isn't
82 // important enough for it.
83 debug!("trans_rvalue: creating ugly alloca");
84 let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp");
85 base::store_ty(&bcx, llval, lltemp, operand.ty);
88 OperandValue::Ref(llref) => llref
90 base::coerce_unsized_into(&bcx, llref, operand.ty, dest.llval, cast_ty);
94 mir::Rvalue::Repeat(ref elem, ref count) => {
95 let tr_elem = self.trans_operand(&bcx, elem);
96 let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
97 let size = C_uint(bcx.ccx, size);
98 let base = base::get_dataptr(&bcx, dest.llval);
99 tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
100 self.store_operand(bcx, llslot, tr_elem, None);
104 mir::Rvalue::Aggregate(ref kind, ref operands) => {
106 mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
107 let disr = Disr::from(adt_def.variants[variant_index].disr_val);
108 let dest_ty = dest.ty.to_ty(bcx.tcx());
109 adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr));
110 for (i, operand) in operands.iter().enumerate() {
111 let op = self.trans_operand(&bcx, operand);
112 // Do not generate stores and GEPis for zero-sized fields.
113 if !common::type_is_zero_size(bcx.ccx, op.ty) {
114 let mut val = LvalueRef::new_sized(dest.llval, dest.ty);
115 let field_index = active_field_index.unwrap_or(i);
116 val.ty = LvalueTy::Downcast {
118 substs: self.monomorphize(&substs),
119 variant_index: disr.0 as usize,
121 let lldest_i = val.trans_field_ptr(&bcx, field_index);
122 self.store_operand(&bcx, lldest_i, op, None);
127 // If this is a tuple or closure, we need to translate GEP indices.
128 let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
129 let translation = if let Layout::Univariant { ref variant, .. } = *layout {
130 Some(&variant.memory_index)
134 for (i, operand) in operands.iter().enumerate() {
135 let op = self.trans_operand(&bcx, operand);
136 // Do not generate stores and GEPis for zero-sized fields.
137 if !common::type_is_zero_size(bcx.ccx, op.ty) {
138 // Note: perhaps this should be StructGep, but
139 // note that in some cases the values here will
140 // not be structs but arrays.
141 let i = if let Some(ref t) = translation {
146 let dest = bcx.gepi(dest.llval, &[0, i]);
147 self.store_operand(&bcx, dest, op, None);
155 mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
156 let outputs = outputs.iter().map(|output| {
157 let lvalue = self.trans_lvalue(&bcx, output);
158 (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
161 let input_vals = inputs.iter().map(|input| {
162 self.trans_operand(&bcx, input).immediate()
165 asm::trans_inline_asm(&bcx, asm, outputs, input_vals);
170 assert!(rvalue_creates_operand(rvalue));
171 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
172 self.store_operand(&bcx, dest.llval, temp, None);
178 pub fn trans_rvalue_operand(&mut self,
179 bcx: Builder<'a, 'tcx>,
180 rvalue: &mir::Rvalue<'tcx>)
181 -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
183 assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
186 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
187 let operand = self.trans_operand(&bcx, source);
188 debug!("cast operand is {:?}", operand);
189 let cast_ty = self.monomorphize(&cast_ty);
191 let val = match *kind {
192 mir::CastKind::ReifyFnPointer => {
193 match operand.ty.sty {
194 ty::TyFnDef(def_id, substs, _) => {
195 OperandValue::Immediate(
196 Callee::def(bcx.ccx, def_id, substs)
200 bug!("{} cannot be reified to a fn ptr", operand.ty)
204 mir::CastKind::UnsafeFnPointer => {
205 // this is a no-op at the LLVM level
208 mir::CastKind::Unsize => {
209 // unsize targets other than to a fat pointer currently
210 // can't be operands.
211 assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
214 OperandValue::Pair(lldata, llextra) => {
215 // unsize from a fat pointer - this is a
216 // "trait-object-to-supertrait" coercion, for
218 // &'a fmt::Debug+Send => &'a fmt::Debug,
219 // So we need to pointercast the base to ensure
220 // the types match up.
221 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
222 let lldata = bcx.pointercast(lldata, llcast_ty);
223 OperandValue::Pair(lldata, llextra)
225 OperandValue::Immediate(lldata) => {
227 let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
228 operand.ty, cast_ty);
229 OperandValue::Pair(lldata, llextra)
231 OperandValue::Ref(_) => {
232 bug!("by-ref operand {:?} in trans_rvalue_operand",
237 mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
238 let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty);
239 let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty);
240 if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
241 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
242 let ll_cft = ll_cast_ty.field_types();
243 let ll_fft = ll_from_ty.field_types();
244 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
245 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
246 OperandValue::Pair(data_cast, meta_ptr)
247 } else { // cast to thin-ptr
248 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
249 // pointer-cast of that pointer to desired pointer type.
250 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
251 OperandValue::Immediate(llval)
254 bug!("Unexpected non-Pair operand")
257 mir::CastKind::Misc => {
258 debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
259 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
260 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
261 let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty);
262 let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty);
263 let llval = operand.immediate();
264 let l = bcx.ccx.layout_of(operand.ty);
265 let signed = if let Layout::CEnum { signed, min, max, .. } = *l {
267 // We want `table[e as usize]` to not
268 // have bound checks, and this is the most
269 // convenient place to put the `assume`.
271 base::call_assume(&bcx, bcx.icmp(
274 C_integral(common::val_ty(llval), max, false)
280 operand.ty.is_signed()
283 let newval = match (r_t_in, r_t_out) {
284 (CastTy::Int(_), CastTy::Int(_)) => {
285 let srcsz = ll_t_in.int_width();
286 let dstsz = ll_t_out.int_width();
288 bcx.bitcast(llval, ll_t_out)
289 } else if srcsz > dstsz {
290 bcx.trunc(llval, ll_t_out)
292 bcx.sext(llval, ll_t_out)
294 bcx.zext(llval, ll_t_out)
297 (CastTy::Float, CastTy::Float) => {
298 let srcsz = ll_t_in.float_width();
299 let dstsz = ll_t_out.float_width();
301 bcx.fpext(llval, ll_t_out)
302 } else if srcsz > dstsz {
303 bcx.fptrunc(llval, ll_t_out)
308 (CastTy::Ptr(_), CastTy::Ptr(_)) |
309 (CastTy::FnPtr, CastTy::Ptr(_)) |
310 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
311 bcx.pointercast(llval, ll_t_out),
312 (CastTy::Ptr(_), CastTy::Int(_)) |
313 (CastTy::FnPtr, CastTy::Int(_)) =>
314 bcx.ptrtoint(llval, ll_t_out),
315 (CastTy::Int(_), CastTy::Ptr(_)) =>
316 bcx.inttoptr(llval, ll_t_out),
317 (CastTy::Int(_), CastTy::Float) if signed =>
318 bcx.sitofp(llval, ll_t_out),
319 (CastTy::Int(_), CastTy::Float) =>
320 bcx.uitofp(llval, ll_t_out),
321 (CastTy::Float, CastTy::Int(IntTy::I)) =>
322 bcx.fptosi(llval, ll_t_out),
323 (CastTy::Float, CastTy::Int(_)) =>
324 bcx.fptoui(llval, ll_t_out),
325 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
327 OperandValue::Immediate(newval)
330 let operand = OperandRef {
337 mir::Rvalue::Ref(_, bk, ref lvalue) => {
338 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
340 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
341 let ref_ty = bcx.tcx().mk_ref(
342 bcx.tcx().mk_region(ty::ReErased),
343 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
346 // Note: lvalues are indirect, so storing the `llval` into the
347 // destination effectively creates a reference.
348 let operand = if bcx.ccx.shared().type_is_sized(ty) {
350 val: OperandValue::Immediate(tr_lvalue.llval),
355 val: OperandValue::Pair(tr_lvalue.llval,
363 mir::Rvalue::Len(ref lvalue) => {
364 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
365 let operand = OperandRef {
366 val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
367 ty: bcx.tcx().types.usize,
372 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
373 let lhs = self.trans_operand(&bcx, lhs);
374 let rhs = self.trans_operand(&bcx, rhs);
375 let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
376 match (lhs.val, rhs.val) {
377 (OperandValue::Pair(lhs_addr, lhs_extra),
378 OperandValue::Pair(rhs_addr, rhs_extra)) => {
379 self.trans_fat_ptr_binop(&bcx, op,
388 self.trans_scalar_binop(&bcx, op,
389 lhs.immediate(), rhs.immediate(),
392 let operand = OperandRef {
393 val: OperandValue::Immediate(llresult),
394 ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
398 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
399 let lhs = self.trans_operand(&bcx, lhs);
400 let rhs = self.trans_operand(&bcx, rhs);
401 let result = self.trans_scalar_checked_binop(&bcx, op,
402 lhs.immediate(), rhs.immediate(),
404 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
405 let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
406 let operand = OperandRef {
414 mir::Rvalue::UnaryOp(op, ref operand) => {
415 let operand = self.trans_operand(&bcx, operand);
416 let lloperand = operand.immediate();
417 let is_float = operand.ty.is_fp();
418 let llval = match op {
419 mir::UnOp::Not => bcx.not(lloperand),
420 mir::UnOp::Neg => if is_float {
427 val: OperandValue::Immediate(llval),
432 mir::Rvalue::Box(content_ty) => {
433 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
434 let llty = type_of::type_of(bcx.ccx, content_ty);
435 let llsize = machine::llsize_of(bcx.ccx, llty);
436 let align = type_of::align_of(bcx.ccx, content_ty);
437 let llalign = C_uint(bcx.ccx, align);
438 let llty_ptr = llty.ptr_to();
439 let box_ty = bcx.tcx().mk_box(content_ty);
442 let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
445 bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
448 let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]))
450 let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
452 let operand = OperandRef {
453 val: OperandValue::Immediate(val),
459 mir::Rvalue::Use(ref operand) => {
460 let operand = self.trans_operand(&bcx, operand);
463 mir::Rvalue::Repeat(..) |
464 mir::Rvalue::Aggregate(..) |
465 mir::Rvalue::InlineAsm { .. } => {
466 bug!("cannot generate operand from rvalue {:?}", rvalue);
472 pub fn trans_scalar_binop(&mut self,
473 bcx: &Builder<'a, 'tcx>,
477 input_ty: Ty<'tcx>) -> ValueRef {
478 let is_float = input_ty.is_fp();
479 let is_signed = input_ty.is_signed();
480 let is_nil = input_ty.is_nil();
481 let is_bool = input_ty.is_bool();
483 mir::BinOp::Add => if is_float {
488 mir::BinOp::Sub => if is_float {
493 mir::BinOp::Mul => if is_float {
498 mir::BinOp::Div => if is_float {
500 } else if is_signed {
505 mir::BinOp::Rem => if is_float {
507 } else if is_signed {
512 mir::BinOp::BitOr => bcx.or(lhs, rhs),
513 mir::BinOp::BitAnd => bcx.and(lhs, rhs),
514 mir::BinOp::BitXor => bcx.xor(lhs, rhs),
515 mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
516 mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
517 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
518 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
519 C_bool(bcx.ccx, match op {
520 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
521 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
526 base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
530 let (lhs, rhs) = if is_bool {
531 // FIXME(#36856) -- extend the bools into `i8` because
532 // LLVM's i1 comparisons are broken.
533 (bcx.zext(lhs, Type::i8(bcx.ccx)),
534 bcx.zext(rhs, Type::i8(bcx.ccx)))
540 base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
547 pub fn trans_fat_ptr_binop(&mut self,
548 bcx: &Builder<'a, 'tcx>,
559 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
560 bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
565 bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
566 bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
569 mir::BinOp::Le | mir::BinOp::Lt |
570 mir::BinOp::Ge | mir::BinOp::Gt => {
571 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
572 let (op, strict_op) = match op {
573 mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
574 mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
575 mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
576 mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
581 bcx.icmp(strict_op, lhs_addr, rhs_addr),
583 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
584 bcx.icmp(op, lhs_extra, rhs_extra)
589 bug!("unexpected fat ptr binop");
594 pub fn trans_scalar_checked_binop(&mut self,
595 bcx: &Builder<'a, 'tcx>,
599 input_ty: Ty<'tcx>) -> OperandValue {
600 // This case can currently arise only from functions marked
601 // with #[rustc_inherit_overflow_checks] and inlined from
602 // another crate (mostly core::num generic/#[inline] fns),
603 // while the current crate doesn't use overflow checks.
604 if !bcx.ccx.check_overflow() {
605 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
606 return OperandValue::Pair(val, C_bool(bcx.ccx, false));
609 // First try performing the operation on constants, which
610 // will only succeed if both operands are constant.
611 // This is necessary to determine when an overflow Assert
612 // will always panic at runtime, and produce a warning.
613 if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
614 return OperandValue::Pair(val, C_bool(bcx.ccx, of));
617 let (val, of) = match op {
618 // These are checked using intrinsics
619 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
621 mir::BinOp::Add => OverflowOp::Add,
622 mir::BinOp::Sub => OverflowOp::Sub,
623 mir::BinOp::Mul => OverflowOp::Mul,
626 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
627 let res = bcx.call(intrinsic, &[lhs, rhs], None);
629 (bcx.extract_value(res, 0),
630 bcx.extract_value(res, 1))
632 mir::BinOp::Shl | mir::BinOp::Shr => {
633 let lhs_llty = val_ty(lhs);
634 let rhs_llty = val_ty(rhs);
635 let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
636 let outer_bits = bcx.and(rhs, invert_mask);
638 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
639 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
644 bug!("Operator `{:?}` is not a checkable operator", op)
648 OperandValue::Pair(val, of)
652 pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool {
654 mir::Rvalue::Ref(..) |
655 mir::Rvalue::Len(..) |
656 mir::Rvalue::Cast(..) | // (*)
657 mir::Rvalue::BinaryOp(..) |
658 mir::Rvalue::CheckedBinaryOp(..) |
659 mir::Rvalue::UnaryOp(..) |
660 mir::Rvalue::Box(..) |
661 mir::Rvalue::Use(..) =>
663 mir::Rvalue::Repeat(..) |
664 mir::Rvalue::Aggregate(..) |
665 mir::Rvalue::InlineAsm { .. } =>
669 // (*) this is only true if the type is suitable
672 #[derive(Copy, Clone)]
677 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
678 use syntax::ast::IntTy::*;
679 use syntax::ast::UintTy::*;
680 use rustc::ty::{TyInt, TyUint};
684 let new_sty = match ty.sty {
685 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
689 _ => panic!("unsupported target word size")
691 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
695 _ => panic!("unsupported target word size")
697 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
698 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
701 let name = match oop {
702 OverflowOp::Add => match new_sty {
703 TyInt(I8) => "llvm.sadd.with.overflow.i8",
704 TyInt(I16) => "llvm.sadd.with.overflow.i16",
705 TyInt(I32) => "llvm.sadd.with.overflow.i32",
706 TyInt(I64) => "llvm.sadd.with.overflow.i64",
707 TyInt(I128) => "llvm.sadd.with.overflow.i128",
709 TyUint(U8) => "llvm.uadd.with.overflow.i8",
710 TyUint(U16) => "llvm.uadd.with.overflow.i16",
711 TyUint(U32) => "llvm.uadd.with.overflow.i32",
712 TyUint(U64) => "llvm.uadd.with.overflow.i64",
713 TyUint(U128) => "llvm.uadd.with.overflow.i128",
717 OverflowOp::Sub => match new_sty {
718 TyInt(I8) => "llvm.ssub.with.overflow.i8",
719 TyInt(I16) => "llvm.ssub.with.overflow.i16",
720 TyInt(I32) => "llvm.ssub.with.overflow.i32",
721 TyInt(I64) => "llvm.ssub.with.overflow.i64",
722 TyInt(I128) => "llvm.ssub.with.overflow.i128",
724 TyUint(U8) => "llvm.usub.with.overflow.i8",
725 TyUint(U16) => "llvm.usub.with.overflow.i16",
726 TyUint(U32) => "llvm.usub.with.overflow.i32",
727 TyUint(U64) => "llvm.usub.with.overflow.i64",
728 TyUint(U128) => "llvm.usub.with.overflow.i128",
732 OverflowOp::Mul => match new_sty {
733 TyInt(I8) => "llvm.smul.with.overflow.i8",
734 TyInt(I16) => "llvm.smul.with.overflow.i16",
735 TyInt(I32) => "llvm.smul.with.overflow.i32",
736 TyInt(I64) => "llvm.smul.with.overflow.i64",
737 TyInt(I128) => "llvm.smul.with.overflow.i128",
739 TyUint(U8) => "llvm.umul.with.overflow.i8",
740 TyUint(U16) => "llvm.umul.with.overflow.i16",
741 TyUint(U32) => "llvm.umul.with.overflow.i32",
742 TyUint(U64) => "llvm.umul.with.overflow.i64",
743 TyUint(U128) => "llvm.umul.with.overflow.i128",
749 bcx.ccx.get_intrinsic(&name)