1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::ty::layout::{Layout, LayoutTyper};
15 use rustc::mir::tcx::LvalueTy;
17 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
22 use common::{self, val_ty, C_bool, C_null, C_uint};
23 use common::{C_integral};
32 use super::MirContext;
33 use super::constant::const_scalar_checked_binop;
34 use super::operand::{OperandRef, OperandValue};
35 use super::lvalue::LvalueRef;
37 impl<'a, 'tcx> MirContext<'a, 'tcx> {
38 pub fn trans_rvalue(&mut self,
39 bcx: Builder<'a, 'tcx>,
40 dest: LvalueRef<'tcx>,
41 rvalue: &mir::Rvalue<'tcx>)
44 debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
45 Value(dest.llval), rvalue);
48 mir::Rvalue::Use(ref operand) => {
49 let tr_operand = self.trans_operand(&bcx, operand);
50 // FIXME: consider not copying constants through stack. (fixable by translating
51 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
52 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
56 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
57 let cast_ty = self.monomorphize(&cast_ty);
59 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
60 // into-coerce of a thin pointer to a fat pointer - just
61 // use the operand path.
62 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
63 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
67 // Unsize of a nontrivial struct. I would prefer for
68 // this to be eliminated by MIR translation, but
69 // `CoerceUnsized` can be passed by a where-clause,
70 // so the (generic) MIR may not be able to expand it.
71 let operand = self.trans_operand(&bcx, source);
72 let operand = operand.pack_if_pair(&bcx);
73 let llref = match operand.val {
74 OperandValue::Pair(..) => bug!(),
75 OperandValue::Immediate(llval) => {
76 // unsize from an immediate structure. We don't
77 // really need a temporary alloca here, but
78 // avoiding it would require us to have
79 // `coerce_unsized_into` use extractvalue to
80 // index into the struct, and this case isn't
81 // important enough for it.
82 debug!("trans_rvalue: creating ugly alloca");
83 let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
84 base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
87 OperandValue::Ref(llref, align) => {
88 LvalueRef::new_sized_ty(llref, operand.ty, align)
91 base::coerce_unsized_into(&bcx, &llref, &dest);
95 mir::Rvalue::Repeat(ref elem, ref count) => {
96 let tr_elem = self.trans_operand(&bcx, elem);
97 let size = count.as_u64(bcx.tcx().sess.target.uint_type);
98 let size = C_uint(bcx.ccx, size);
99 let base = base::get_dataptr(&bcx, dest.llval);
100 tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| {
101 self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
106 mir::Rvalue::Aggregate(ref kind, ref operands) => {
108 mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
109 let discr = adt_def.discriminant_for_variant(bcx.tcx(), variant_index)
110 .to_u128_unchecked() as u64;
111 let dest_ty = dest.ty.to_ty(bcx.tcx());
112 adt::trans_set_discr(&bcx, dest_ty, dest.llval, discr);
113 for (i, operand) in operands.iter().enumerate() {
114 let op = self.trans_operand(&bcx, operand);
115 // Do not generate stores and GEPis for zero-sized fields.
116 if !common::type_is_zero_size(bcx.ccx, op.ty) {
117 let mut val = LvalueRef::new_sized(
118 dest.llval, dest.ty, dest.alignment);
119 let field_index = active_field_index.unwrap_or(i);
120 val.ty = LvalueTy::Downcast {
122 substs: self.monomorphize(&substs),
123 variant_index: variant_index,
125 let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
126 self.store_operand(&bcx, lldest_i, align.to_align(), op);
131 // If this is a tuple or closure, we need to translate GEP indices.
132 let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
133 let get_memory_index = |i| {
134 if let Layout::Univariant { ref variant, .. } = *layout {
135 adt::struct_llfields_index(variant, i)
140 let alignment = dest.alignment;
141 for (i, operand) in operands.iter().enumerate() {
142 let op = self.trans_operand(&bcx, operand);
143 // Do not generate stores and GEPis for zero-sized fields.
144 if !common::type_is_zero_size(bcx.ccx, op.ty) {
145 // Note: perhaps this should be StructGep, but
146 // note that in some cases the values here will
147 // not be structs but arrays.
148 let i = get_memory_index(i);
149 let dest = bcx.gepi(dest.llval, &[0, i]);
150 self.store_operand(&bcx, dest, alignment.to_align(), op);
159 assert!(self.rvalue_creates_operand(rvalue));
160 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
161 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
167 pub fn trans_rvalue_operand(&mut self,
168 bcx: Builder<'a, 'tcx>,
169 rvalue: &mir::Rvalue<'tcx>)
170 -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
172 assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
175 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
176 let operand = self.trans_operand(&bcx, source);
177 debug!("cast operand is {:?}", operand);
178 let cast_ty = self.monomorphize(&cast_ty);
180 let val = match *kind {
181 mir::CastKind::ReifyFnPointer => {
182 match operand.ty.sty {
183 ty::TyFnDef(def_id, substs) => {
184 OperandValue::Immediate(
185 callee::resolve_and_get_fn(bcx.ccx, def_id, substs))
188 bug!("{} cannot be reified to a fn ptr", operand.ty)
192 mir::CastKind::ClosureFnPointer => {
193 match operand.ty.sty {
194 ty::TyClosure(def_id, substs) => {
195 let instance = monomorphize::resolve_closure(
196 bcx.ccx.shared(), def_id, substs, ty::ClosureKind::FnOnce);
197 OperandValue::Immediate(callee::get_fn(bcx.ccx, instance))
200 bug!("{} cannot be cast to a fn ptr", operand.ty)
204 mir::CastKind::UnsafeFnPointer => {
205 // this is a no-op at the LLVM level
208 mir::CastKind::Unsize => {
209 // unsize targets other than to a fat pointer currently
210 // can't be operands.
211 assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
214 OperandValue::Pair(lldata, llextra) => {
215 // unsize from a fat pointer - this is a
216 // "trait-object-to-supertrait" coercion, for
218 // &'a fmt::Debug+Send => &'a fmt::Debug,
219 // So we need to pointercast the base to ensure
220 // the types match up.
221 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
222 let lldata = bcx.pointercast(lldata, llcast_ty);
223 OperandValue::Pair(lldata, llextra)
225 OperandValue::Immediate(lldata) => {
227 let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
228 operand.ty, cast_ty);
229 OperandValue::Pair(lldata, llextra)
231 OperandValue::Ref(..) => {
232 bug!("by-ref operand {:?} in trans_rvalue_operand",
237 mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
238 let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty);
239 let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty);
240 if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
241 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
242 let ll_cft = ll_cast_ty.field_types();
243 let ll_fft = ll_from_ty.field_types();
244 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
245 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
246 OperandValue::Pair(data_cast, meta_ptr)
247 } else { // cast to thin-ptr
248 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
249 // pointer-cast of that pointer to desired pointer type.
250 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
251 OperandValue::Immediate(llval)
254 bug!("Unexpected non-Pair operand")
257 mir::CastKind::Misc => {
258 debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
259 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
260 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
261 let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty);
262 let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty);
263 let llval = operand.immediate();
264 let l = bcx.ccx.layout_of(operand.ty);
265 let signed = if let Layout::CEnum { signed, min, max, .. } = *l {
267 // We want `table[e as usize]` to not
268 // have bound checks, and this is the most
269 // convenient place to put the `assume`.
271 base::call_assume(&bcx, bcx.icmp(
274 C_integral(common::val_ty(llval), max, false)
280 operand.ty.is_signed()
283 let newval = match (r_t_in, r_t_out) {
284 (CastTy::Int(_), CastTy::Int(_)) => {
285 bcx.intcast(llval, ll_t_out, signed)
287 (CastTy::Float, CastTy::Float) => {
288 let srcsz = ll_t_in.float_width();
289 let dstsz = ll_t_out.float_width();
291 bcx.fpext(llval, ll_t_out)
292 } else if srcsz > dstsz {
293 bcx.fptrunc(llval, ll_t_out)
298 (CastTy::Ptr(_), CastTy::Ptr(_)) |
299 (CastTy::FnPtr, CastTy::Ptr(_)) |
300 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
301 bcx.pointercast(llval, ll_t_out),
302 (CastTy::Ptr(_), CastTy::Int(_)) |
303 (CastTy::FnPtr, CastTy::Int(_)) =>
304 bcx.ptrtoint(llval, ll_t_out),
305 (CastTy::Int(_), CastTy::Ptr(_)) =>
306 bcx.inttoptr(llval, ll_t_out),
307 (CastTy::Int(_), CastTy::Float) if signed =>
308 bcx.sitofp(llval, ll_t_out),
309 (CastTy::Int(_), CastTy::Float) =>
310 bcx.uitofp(llval, ll_t_out),
311 (CastTy::Float, CastTy::Int(IntTy::I)) =>
312 bcx.fptosi(llval, ll_t_out),
313 (CastTy::Float, CastTy::Int(_)) =>
314 bcx.fptoui(llval, ll_t_out),
315 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
317 OperandValue::Immediate(newval)
320 let operand = OperandRef {
327 mir::Rvalue::Ref(_, bk, ref lvalue) => {
328 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
330 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
331 let ref_ty = bcx.tcx().mk_ref(
332 bcx.tcx().types.re_erased,
333 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
336 // Note: lvalues are indirect, so storing the `llval` into the
337 // destination effectively creates a reference.
338 let operand = if bcx.ccx.shared().type_is_sized(ty) {
340 val: OperandValue::Immediate(tr_lvalue.llval),
345 val: OperandValue::Pair(tr_lvalue.llval,
353 mir::Rvalue::Len(ref lvalue) => {
354 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
355 let operand = OperandRef {
356 val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
357 ty: bcx.tcx().types.usize,
362 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
363 let lhs = self.trans_operand(&bcx, lhs);
364 let rhs = self.trans_operand(&bcx, rhs);
365 let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
366 match (lhs.val, rhs.val) {
367 (OperandValue::Pair(lhs_addr, lhs_extra),
368 OperandValue::Pair(rhs_addr, rhs_extra)) => {
369 self.trans_fat_ptr_binop(&bcx, op,
378 self.trans_scalar_binop(&bcx, op,
379 lhs.immediate(), rhs.immediate(),
382 let operand = OperandRef {
383 val: OperandValue::Immediate(llresult),
384 ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
388 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
389 let lhs = self.trans_operand(&bcx, lhs);
390 let rhs = self.trans_operand(&bcx, rhs);
391 let result = self.trans_scalar_checked_binop(&bcx, op,
392 lhs.immediate(), rhs.immediate(),
394 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
395 let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
396 let operand = OperandRef {
404 mir::Rvalue::UnaryOp(op, ref operand) => {
405 let operand = self.trans_operand(&bcx, operand);
406 let lloperand = operand.immediate();
407 let is_float = operand.ty.is_fp();
408 let llval = match op {
409 mir::UnOp::Not => bcx.not(lloperand),
410 mir::UnOp::Neg => if is_float {
417 val: OperandValue::Immediate(llval),
422 mir::Rvalue::Discriminant(ref lvalue) => {
423 let discr_lvalue = self.trans_lvalue(&bcx, lvalue);
424 let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx());
425 let discr_ty = rvalue.ty(&self.mir.local_decls, bcx.tcx());
426 let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty);
427 let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval,
428 discr_lvalue.alignment, Some(discr_type), true);
430 val: OperandValue::Immediate(discr),
435 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
436 assert!(bcx.ccx.shared().type_is_sized(ty));
437 let val = C_uint(bcx.ccx, bcx.ccx.size_of(ty));
440 val: OperandValue::Immediate(val),
445 mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
446 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
447 let llty = type_of::type_of(bcx.ccx, content_ty);
448 let llsize = machine::llsize_of(bcx.ccx, llty);
449 let align = bcx.ccx.align_of(content_ty);
450 let llalign = C_uint(bcx.ccx, align);
451 let llty_ptr = llty.ptr_to();
452 let box_ty = bcx.tcx().mk_box(content_ty);
455 let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
458 bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
461 let instance = ty::Instance::mono(bcx.tcx(), def_id);
462 let r = callee::get_fn(bcx.ccx, instance);
463 let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
465 let operand = OperandRef {
466 val: OperandValue::Immediate(val),
471 mir::Rvalue::Use(ref operand) => {
472 let operand = self.trans_operand(&bcx, operand);
475 mir::Rvalue::Repeat(..) |
476 mir::Rvalue::Aggregate(..) => {
477 // According to `rvalue_creates_operand`, only ZST
478 // aggregate rvalues are allowed to be operands.
479 let ty = rvalue.ty(&self.mir.local_decls, self.ccx.tcx());
480 (bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty)))
485 pub fn trans_scalar_binop(&mut self,
486 bcx: &Builder<'a, 'tcx>,
490 input_ty: Ty<'tcx>) -> ValueRef {
491 let is_float = input_ty.is_fp();
492 let is_signed = input_ty.is_signed();
493 let is_nil = input_ty.is_nil();
494 let is_bool = input_ty.is_bool();
496 mir::BinOp::Add => if is_float {
501 mir::BinOp::Sub => if is_float {
506 mir::BinOp::Mul => if is_float {
511 mir::BinOp::Div => if is_float {
513 } else if is_signed {
518 mir::BinOp::Rem => if is_float {
520 } else if is_signed {
525 mir::BinOp::BitOr => bcx.or(lhs, rhs),
526 mir::BinOp::BitAnd => bcx.and(lhs, rhs),
527 mir::BinOp::BitXor => bcx.xor(lhs, rhs),
528 mir::BinOp::Offset => bcx.inbounds_gep(lhs, &[rhs]),
529 mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
530 mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
531 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
532 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
533 C_bool(bcx.ccx, match op {
534 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
535 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
540 base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
544 let (lhs, rhs) = if is_bool {
545 // FIXME(#36856) -- extend the bools into `i8` because
546 // LLVM's i1 comparisons are broken.
547 (bcx.zext(lhs, Type::i8(bcx.ccx)),
548 bcx.zext(rhs, Type::i8(bcx.ccx)))
554 base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
561 pub fn trans_fat_ptr_binop(&mut self,
562 bcx: &Builder<'a, 'tcx>,
573 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
574 bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
579 bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
580 bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
583 mir::BinOp::Le | mir::BinOp::Lt |
584 mir::BinOp::Ge | mir::BinOp::Gt => {
585 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
586 let (op, strict_op) = match op {
587 mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
588 mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
589 mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
590 mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
595 bcx.icmp(strict_op, lhs_addr, rhs_addr),
597 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
598 bcx.icmp(op, lhs_extra, rhs_extra)
603 bug!("unexpected fat ptr binop");
608 pub fn trans_scalar_checked_binop(&mut self,
609 bcx: &Builder<'a, 'tcx>,
613 input_ty: Ty<'tcx>) -> OperandValue {
614 // This case can currently arise only from functions marked
615 // with #[rustc_inherit_overflow_checks] and inlined from
616 // another crate (mostly core::num generic/#[inline] fns),
617 // while the current crate doesn't use overflow checks.
618 if !bcx.ccx.check_overflow() {
619 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
620 return OperandValue::Pair(val, C_bool(bcx.ccx, false));
623 // First try performing the operation on constants, which
624 // will only succeed if both operands are constant.
625 // This is necessary to determine when an overflow Assert
626 // will always panic at runtime, and produce a warning.
627 if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
628 return OperandValue::Pair(val, C_bool(bcx.ccx, of));
631 let (val, of) = match op {
632 // These are checked using intrinsics
633 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
635 mir::BinOp::Add => OverflowOp::Add,
636 mir::BinOp::Sub => OverflowOp::Sub,
637 mir::BinOp::Mul => OverflowOp::Mul,
640 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
641 let res = bcx.call(intrinsic, &[lhs, rhs], None);
643 (bcx.extract_value(res, 0),
644 bcx.extract_value(res, 1))
646 mir::BinOp::Shl | mir::BinOp::Shr => {
647 let lhs_llty = val_ty(lhs);
648 let rhs_llty = val_ty(rhs);
649 let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
650 let outer_bits = bcx.and(rhs, invert_mask);
652 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
653 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
658 bug!("Operator `{:?}` is not a checkable operator", op)
662 OperandValue::Pair(val, of)
665 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
667 mir::Rvalue::Ref(..) |
668 mir::Rvalue::Len(..) |
669 mir::Rvalue::Cast(..) | // (*)
670 mir::Rvalue::BinaryOp(..) |
671 mir::Rvalue::CheckedBinaryOp(..) |
672 mir::Rvalue::UnaryOp(..) |
673 mir::Rvalue::Discriminant(..) |
674 mir::Rvalue::NullaryOp(..) |
675 mir::Rvalue::Use(..) => // (*)
677 mir::Rvalue::Repeat(..) |
678 mir::Rvalue::Aggregate(..) => {
679 let ty = rvalue.ty(&self.mir.local_decls, self.ccx.tcx());
680 let ty = self.monomorphize(&ty);
681 common::type_is_zero_size(self.ccx, ty)
685 // (*) this is only true if the type is suitable
689 #[derive(Copy, Clone)]
694 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
695 use syntax::ast::IntTy::*;
696 use syntax::ast::UintTy::*;
697 use rustc::ty::{TyInt, TyUint};
701 let new_sty = match ty.sty {
702 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
706 _ => panic!("unsupported target word size")
708 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
712 _ => panic!("unsupported target word size")
714 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
715 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
718 let name = match oop {
719 OverflowOp::Add => match new_sty {
720 TyInt(I8) => "llvm.sadd.with.overflow.i8",
721 TyInt(I16) => "llvm.sadd.with.overflow.i16",
722 TyInt(I32) => "llvm.sadd.with.overflow.i32",
723 TyInt(I64) => "llvm.sadd.with.overflow.i64",
724 TyInt(I128) => "llvm.sadd.with.overflow.i128",
726 TyUint(U8) => "llvm.uadd.with.overflow.i8",
727 TyUint(U16) => "llvm.uadd.with.overflow.i16",
728 TyUint(U32) => "llvm.uadd.with.overflow.i32",
729 TyUint(U64) => "llvm.uadd.with.overflow.i64",
730 TyUint(U128) => "llvm.uadd.with.overflow.i128",
734 OverflowOp::Sub => match new_sty {
735 TyInt(I8) => "llvm.ssub.with.overflow.i8",
736 TyInt(I16) => "llvm.ssub.with.overflow.i16",
737 TyInt(I32) => "llvm.ssub.with.overflow.i32",
738 TyInt(I64) => "llvm.ssub.with.overflow.i64",
739 TyInt(I128) => "llvm.ssub.with.overflow.i128",
741 TyUint(U8) => "llvm.usub.with.overflow.i8",
742 TyUint(U16) => "llvm.usub.with.overflow.i16",
743 TyUint(U32) => "llvm.usub.with.overflow.i32",
744 TyUint(U64) => "llvm.usub.with.overflow.i64",
745 TyUint(U128) => "llvm.usub.with.overflow.i128",
749 OverflowOp::Mul => match new_sty {
750 TyInt(I8) => "llvm.smul.with.overflow.i8",
751 TyInt(I16) => "llvm.smul.with.overflow.i16",
752 TyInt(I32) => "llvm.smul.with.overflow.i32",
753 TyInt(I64) => "llvm.smul.with.overflow.i64",
754 TyInt(I128) => "llvm.smul.with.overflow.i128",
756 TyUint(U8) => "llvm.umul.with.overflow.i8",
757 TyUint(U16) => "llvm.umul.with.overflow.i16",
758 TyUint(U32) => "llvm.umul.with.overflow.i32",
759 TyUint(U64) => "llvm.umul.with.overflow.i64",
760 TyUint(U128) => "llvm.umul.with.overflow.i128",
766 bcx.ccx.get_intrinsic(&name)