]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/mir/rvalue.rs
Rollup merge of #35157 - sanxiyn:remove-workaround, r=eddyb
[rust.git] / src / librustc_trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::mir::repr as mir;
15
16 use asm;
17 use base;
18 use callee::Callee;
19 use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
20 use datum::{Datum, Lvalue};
21 use debuginfo::DebugLoc;
22 use adt;
23 use machine;
24 use type_of;
25 use tvec;
26 use value::Value;
27 use Disr;
28
29 use super::MirContext;
30 use super::constant::const_scalar_checked_binop;
31 use super::operand::{OperandRef, OperandValue};
32 use super::lvalue::{LvalueRef, get_dataptr};
33
34 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
35     pub fn trans_rvalue(&mut self,
36                         bcx: BlockAndBuilder<'bcx, 'tcx>,
37                         dest: LvalueRef<'tcx>,
38                         rvalue: &mir::Rvalue<'tcx>,
39                         debug_loc: DebugLoc)
40                         -> BlockAndBuilder<'bcx, 'tcx>
41     {
42         debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
43                Value(dest.llval), rvalue);
44
45         match *rvalue {
46            mir::Rvalue::Use(ref operand) => {
47                let tr_operand = self.trans_operand(&bcx, operand);
48                // FIXME: consider not copying constants through stack. (fixable by translating
49                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
50                self.store_operand(&bcx, dest.llval, tr_operand);
51                bcx
52            }
53
54             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
55                 let cast_ty = bcx.monomorphize(&cast_ty);
56
57                 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
58                     // into-coerce of a thin pointer to a fat pointer - just
59                     // use the operand path.
60                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
61                     self.store_operand(&bcx, dest.llval, temp);
62                     return bcx;
63                 }
64
65                 // Unsize of a nontrivial struct. I would prefer for
66                 // this to be eliminated by MIR translation, but
67                 // `CoerceUnsized` can be passed by a where-clause,
68                 // so the (generic) MIR may not be able to expand it.
69                 let operand = self.trans_operand(&bcx, source);
70                 let operand = operand.pack_if_pair(&bcx);
71                 bcx.with_block(|bcx| {
72                     match operand.val {
73                         OperandValue::Pair(..) => bug!(),
74                         OperandValue::Immediate(llval) => {
75                             // unsize from an immediate structure. We don't
76                             // really need a temporary alloca here, but
77                             // avoiding it would require us to have
78                             // `coerce_unsized_into` use extractvalue to
79                             // index into the struct, and this case isn't
80                             // important enough for it.
81                             debug!("trans_rvalue: creating ugly alloca");
82                             let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
83                             base::store_ty(bcx, llval, lltemp, operand.ty);
84                             base::coerce_unsized_into(bcx,
85                                                       lltemp, operand.ty,
86                                                       dest.llval, cast_ty);
87                         }
88                         OperandValue::Ref(llref) => {
89                             base::coerce_unsized_into(bcx,
90                                                       llref, operand.ty,
91                                                       dest.llval, cast_ty);
92                         }
93                     }
94                 });
95                 bcx
96             }
97
98             mir::Rvalue::Repeat(ref elem, ref count) => {
99                 let tr_elem = self.trans_operand(&bcx, elem);
100                 let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
101                 let size = C_uint(bcx.ccx(), size);
102                 let base = get_dataptr(&bcx, dest.llval);
103                 let bcx = bcx.map_block(|block| {
104                     tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
105                         self.store_operand_direct(block, llslot, tr_elem);
106                         block
107                     })
108                 });
109                 bcx
110             }
111
112             mir::Rvalue::Aggregate(ref kind, ref operands) => {
113                 match *kind {
114                     mir::AggregateKind::Adt(adt_def, index, _) => {
115                         let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
116                         let disr = Disr::from(adt_def.variants[index].disr_val);
117                         bcx.with_block(|bcx| {
118                             adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
119                         });
120                         for (i, operand) in operands.iter().enumerate() {
121                             let op = self.trans_operand(&bcx, operand);
122                             // Do not generate stores and GEPis for zero-sized fields.
123                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
124                                 let val = adt::MaybeSizedValue::sized(dest.llval);
125                                 let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
126                                                                             val, disr, i);
127                                 self.store_operand(&bcx, lldest_i, op);
128                             }
129                         }
130                     },
131                     _ => {
132                         // FIXME Shouldn't need to manually trigger closure instantiations.
133                         if let mir::AggregateKind::Closure(def_id, substs) = *kind {
134                             use closure;
135
136                             closure::trans_closure_body_via_mir(bcx.ccx(),
137                                                                 def_id,
138                                                                 bcx.monomorphize(&substs));
139                         }
140
141                         for (i, operand) in operands.iter().enumerate() {
142                             let op = self.trans_operand(&bcx, operand);
143                             // Do not generate stores and GEPis for zero-sized fields.
144                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
145                                 // Note: perhaps this should be StructGep, but
146                                 // note that in some cases the values here will
147                                 // not be structs but arrays.
148                                 let dest = bcx.gepi(dest.llval, &[0, i]);
149                                 self.store_operand(&bcx, dest, op);
150                             }
151                         }
152                     }
153                 }
154                 bcx
155             }
156
157             mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
158                 let outputs = outputs.iter().map(|output| {
159                     let lvalue = self.trans_lvalue(&bcx, output);
160                     Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
161                                Lvalue::new("out"))
162                 }).collect();
163
164                 let input_vals = inputs.iter().map(|input| {
165                     self.trans_operand(&bcx, input).immediate()
166                 }).collect();
167
168                 bcx.with_block(|bcx| {
169                     asm::trans_inline_asm(bcx, asm, outputs, input_vals);
170                 });
171
172                 bcx
173             }
174
175             _ => {
176                 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
177                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
178                 self.store_operand(&bcx, dest.llval, temp);
179                 bcx
180             }
181         }
182     }
183
184     pub fn trans_rvalue_operand(&mut self,
185                                 bcx: BlockAndBuilder<'bcx, 'tcx>,
186                                 rvalue: &mir::Rvalue<'tcx>,
187                                 debug_loc: DebugLoc)
188                                 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
189     {
190         assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
191                 "cannot trans {:?} to operand", rvalue);
192
193         match *rvalue {
194             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
195                 let operand = self.trans_operand(&bcx, source);
196                 debug!("cast operand is {:?}", operand);
197                 let cast_ty = bcx.monomorphize(&cast_ty);
198
199                 let val = match *kind {
200                     mir::CastKind::ReifyFnPointer => {
201                         match operand.ty.sty {
202                             ty::TyFnDef(def_id, substs, _) => {
203                                 OperandValue::Immediate(
204                                     Callee::def(bcx.ccx(), def_id, substs)
205                                         .reify(bcx.ccx()).val)
206                             }
207                             _ => {
208                                 bug!("{} cannot be reified to a fn ptr", operand.ty)
209                             }
210                         }
211                     }
212                     mir::CastKind::UnsafeFnPointer => {
213                         // this is a no-op at the LLVM level
214                         operand.val
215                     }
216                     mir::CastKind::Unsize => {
217                         // unsize targets other than to a fat pointer currently
218                         // can't be operands.
219                         assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
220
221                         match operand.val {
222                             OperandValue::Pair(lldata, llextra) => {
223                                 // unsize from a fat pointer - this is a
224                                 // "trait-object-to-supertrait" coercion, for
225                                 // example,
226                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
227                                 // So we need to pointercast the base to ensure
228                                 // the types match up.
229                                 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
230                                 let lldata = bcx.pointercast(lldata, llcast_ty);
231                                 OperandValue::Pair(lldata, llextra)
232                             }
233                             OperandValue::Immediate(lldata) => {
234                                 // "standard" unsize
235                                 let (lldata, llextra) = bcx.with_block(|bcx| {
236                                     base::unsize_thin_ptr(bcx, lldata,
237                                                           operand.ty, cast_ty)
238                                 });
239                                 OperandValue::Pair(lldata, llextra)
240                             }
241                             OperandValue::Ref(_) => {
242                                 bug!("by-ref operand {:?} in trans_rvalue_operand",
243                                      operand);
244                             }
245                         }
246                     }
247                     mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
248                         debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
249                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
250                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
251                         let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
252                         let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
253                         let llval = operand.immediate();
254                         let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
255                             let repr = adt::represent_type(bcx.ccx(), operand.ty);
256                             adt::is_discr_signed(&repr)
257                         } else {
258                             operand.ty.is_signed()
259                         };
260
261                         let newval = match (r_t_in, r_t_out) {
262                             (CastTy::Int(_), CastTy::Int(_)) => {
263                                 let srcsz = ll_t_in.int_width();
264                                 let dstsz = ll_t_out.int_width();
265                                 if srcsz == dstsz {
266                                     bcx.bitcast(llval, ll_t_out)
267                                 } else if srcsz > dstsz {
268                                     bcx.trunc(llval, ll_t_out)
269                                 } else if signed {
270                                     bcx.sext(llval, ll_t_out)
271                                 } else {
272                                     bcx.zext(llval, ll_t_out)
273                                 }
274                             }
275                             (CastTy::Float, CastTy::Float) => {
276                                 let srcsz = ll_t_in.float_width();
277                                 let dstsz = ll_t_out.float_width();
278                                 if dstsz > srcsz {
279                                     bcx.fpext(llval, ll_t_out)
280                                 } else if srcsz > dstsz {
281                                     bcx.fptrunc(llval, ll_t_out)
282                                 } else {
283                                     llval
284                                 }
285                             }
286                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
287                             (CastTy::FnPtr, CastTy::Ptr(_)) |
288                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
289                                 bcx.pointercast(llval, ll_t_out),
290                             (CastTy::Ptr(_), CastTy::Int(_)) |
291                             (CastTy::FnPtr, CastTy::Int(_)) =>
292                                 bcx.ptrtoint(llval, ll_t_out),
293                             (CastTy::Int(_), CastTy::Ptr(_)) =>
294                                 bcx.inttoptr(llval, ll_t_out),
295                             (CastTy::Int(_), CastTy::Float) if signed =>
296                                 bcx.sitofp(llval, ll_t_out),
297                             (CastTy::Int(_), CastTy::Float) =>
298                                 bcx.uitofp(llval, ll_t_out),
299                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
300                                 bcx.fptosi(llval, ll_t_out),
301                             (CastTy::Float, CastTy::Int(_)) =>
302                                 bcx.fptoui(llval, ll_t_out),
303                             _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
304                         };
305                         OperandValue::Immediate(newval)
306                     }
307                     mir::CastKind::Misc => { // Casts from a fat-ptr.
308                         let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
309                         let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
310                         if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
311                             if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
312                                 let ll_cft = ll_cast_ty.field_types();
313                                 let ll_fft = ll_from_ty.field_types();
314                                 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
315                                 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
316                                 OperandValue::Pair(data_cast, meta_ptr)
317                             } else { // cast to thin-ptr
318                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
319                                 // pointer-cast of that pointer to desired pointer type.
320                                 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
321                                 OperandValue::Immediate(llval)
322                             }
323                         } else {
324                             bug!("Unexpected non-Pair operand")
325                         }
326                     }
327                 };
328                 let operand = OperandRef {
329                     val: val,
330                     ty: cast_ty
331                 };
332                 (bcx, operand)
333             }
334
335             mir::Rvalue::Ref(_, bk, ref lvalue) => {
336                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
337
338                 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
339                 let ref_ty = bcx.tcx().mk_ref(
340                     bcx.tcx().mk_region(ty::ReErased),
341                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
342                 );
343
344                 // Note: lvalues are indirect, so storing the `llval` into the
345                 // destination effectively creates a reference.
346                 let operand = if common::type_is_sized(bcx.tcx(), ty) {
347                     OperandRef {
348                         val: OperandValue::Immediate(tr_lvalue.llval),
349                         ty: ref_ty,
350                     }
351                 } else {
352                     OperandRef {
353                         val: OperandValue::Pair(tr_lvalue.llval,
354                                                 tr_lvalue.llextra),
355                         ty: ref_ty,
356                     }
357                 };
358                 (bcx, operand)
359             }
360
361             mir::Rvalue::Len(ref lvalue) => {
362                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
363                 let operand = OperandRef {
364                     val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())),
365                     ty: bcx.tcx().types.usize,
366                 };
367                 (bcx, operand)
368             }
369
370             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
371                 let lhs = self.trans_operand(&bcx, lhs);
372                 let rhs = self.trans_operand(&bcx, rhs);
373                 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
374                     match (lhs.val, rhs.val) {
375                         (OperandValue::Pair(lhs_addr, lhs_extra),
376                          OperandValue::Pair(rhs_addr, rhs_extra)) => {
377                             bcx.with_block(|bcx| {
378                                 base::compare_fat_ptrs(bcx,
379                                                        lhs_addr, lhs_extra,
380                                                        rhs_addr, rhs_extra,
381                                                        lhs.ty, op.to_hir_binop(),
382                                                        debug_loc)
383                             })
384                         }
385                         _ => bug!()
386                     }
387
388                 } else {
389                     self.trans_scalar_binop(&bcx, op,
390                                             lhs.immediate(), rhs.immediate(),
391                                             lhs.ty)
392                 };
393                 let operand = OperandRef {
394                     val: OperandValue::Immediate(llresult),
395                     ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
396                 };
397                 (bcx, operand)
398             }
399             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
400                 let lhs = self.trans_operand(&bcx, lhs);
401                 let rhs = self.trans_operand(&bcx, rhs);
402                 let result = self.trans_scalar_checked_binop(&bcx, op,
403                                                              lhs.immediate(), rhs.immediate(),
404                                                              lhs.ty);
405                 let val_ty = self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty);
406                 let operand_ty = bcx.tcx().mk_tup(vec![val_ty, bcx.tcx().types.bool]);
407                 let operand = OperandRef {
408                     val: result,
409                     ty: operand_ty
410                 };
411
412                 (bcx, operand)
413             }
414
415             mir::Rvalue::UnaryOp(op, ref operand) => {
416                 let operand = self.trans_operand(&bcx, operand);
417                 let lloperand = operand.immediate();
418                 let is_float = operand.ty.is_fp();
419                 let llval = match op {
420                     mir::UnOp::Not => bcx.not(lloperand),
421                     mir::UnOp::Neg => if is_float {
422                         bcx.fneg(lloperand)
423                     } else {
424                         bcx.neg(lloperand)
425                     }
426                 };
427                 (bcx, OperandRef {
428                     val: OperandValue::Immediate(llval),
429                     ty: operand.ty,
430                 })
431             }
432
433             mir::Rvalue::Box(content_ty) => {
434                 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
435                 let llty = type_of::type_of(bcx.ccx(), content_ty);
436                 let llsize = machine::llsize_of(bcx.ccx(), llty);
437                 let align = type_of::align_of(bcx.ccx(), content_ty);
438                 let llalign = C_uint(bcx.ccx(), align);
439                 let llty_ptr = llty.ptr_to();
440                 let box_ty = bcx.tcx().mk_box(content_ty);
441                 let mut llval = None;
442                 let bcx = bcx.map_block(|bcx| {
443                     let Result { bcx, val } = base::malloc_raw_dyn(bcx,
444                                                                    llty_ptr,
445                                                                    box_ty,
446                                                                    llsize,
447                                                                    llalign,
448                                                                    debug_loc);
449                     llval = Some(val);
450                     bcx
451                 });
452                 let operand = OperandRef {
453                     val: OperandValue::Immediate(llval.unwrap()),
454                     ty: box_ty,
455                 };
456                 (bcx, operand)
457             }
458
459             mir::Rvalue::Use(ref operand) => {
460                 let operand = self.trans_operand(&bcx, operand);
461                 (bcx, operand)
462             }
463             mir::Rvalue::Repeat(..) |
464             mir::Rvalue::Aggregate(..) |
465             mir::Rvalue::InlineAsm { .. } => {
466                 bug!("cannot generate operand from rvalue {:?}", rvalue);
467
468             }
469         }
470     }
471
472     pub fn trans_scalar_binop(&mut self,
473                               bcx: &BlockAndBuilder<'bcx, 'tcx>,
474                               op: mir::BinOp,
475                               lhs: ValueRef,
476                               rhs: ValueRef,
477                               input_ty: Ty<'tcx>) -> ValueRef {
478         let is_float = input_ty.is_fp();
479         let is_signed = input_ty.is_signed();
480         match op {
481             mir::BinOp::Add => if is_float {
482                 bcx.fadd(lhs, rhs)
483             } else {
484                 bcx.add(lhs, rhs)
485             },
486             mir::BinOp::Sub => if is_float {
487                 bcx.fsub(lhs, rhs)
488             } else {
489                 bcx.sub(lhs, rhs)
490             },
491             mir::BinOp::Mul => if is_float {
492                 bcx.fmul(lhs, rhs)
493             } else {
494                 bcx.mul(lhs, rhs)
495             },
496             mir::BinOp::Div => if is_float {
497                 bcx.fdiv(lhs, rhs)
498             } else if is_signed {
499                 bcx.sdiv(lhs, rhs)
500             } else {
501                 bcx.udiv(lhs, rhs)
502             },
503             mir::BinOp::Rem => if is_float {
504                 bcx.frem(lhs, rhs)
505             } else if is_signed {
506                 bcx.srem(lhs, rhs)
507             } else {
508                 bcx.urem(lhs, rhs)
509             },
510             mir::BinOp::BitOr => bcx.or(lhs, rhs),
511             mir::BinOp::BitAnd => bcx.and(lhs, rhs),
512             mir::BinOp::BitXor => bcx.xor(lhs, rhs),
513             mir::BinOp::Shl => {
514                 bcx.with_block(|bcx| {
515                     common::build_unchecked_lshift(bcx,
516                                                    lhs,
517                                                    rhs,
518                                                    DebugLoc::None)
519                 })
520             }
521             mir::BinOp::Shr => {
522                 bcx.with_block(|bcx| {
523                     common::build_unchecked_rshift(bcx,
524                                                    input_ty,
525                                                    lhs,
526                                                    rhs,
527                                                    DebugLoc::None)
528                 })
529             }
530             mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
531             mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
532                 bcx.with_block(|bcx| {
533                     base::compare_scalar_types(bcx, lhs, rhs, input_ty,
534                                                op.to_hir_binop(), DebugLoc::None)
535                 })
536             }
537         }
538     }
539
540     pub fn trans_scalar_checked_binop(&mut self,
541                                       bcx: &BlockAndBuilder<'bcx, 'tcx>,
542                                       op: mir::BinOp,
543                                       lhs: ValueRef,
544                                       rhs: ValueRef,
545                                       input_ty: Ty<'tcx>) -> OperandValue {
546         // This case can currently arise only from functions marked
547         // with #[rustc_inherit_overflow_checks] and inlined from
548         // another crate (mostly core::num generic/#[inline] fns),
549         // while the current crate doesn't use overflow checks.
550         if !bcx.ccx().check_overflow() {
551             let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
552             return OperandValue::Pair(val, C_bool(bcx.ccx(), false));
553         }
554
555         // First try performing the operation on constants, which
556         // will only succeed if both operands are constant.
557         // This is necessary to determine when an overflow Assert
558         // will always panic at runtime, and produce a warning.
559         if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
560             return OperandValue::Pair(val, C_bool(bcx.ccx(), of));
561         }
562
563         let (val, of) = match op {
564             // These are checked using intrinsics
565             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
566                 let oop = match op {
567                     mir::BinOp::Add => OverflowOp::Add,
568                     mir::BinOp::Sub => OverflowOp::Sub,
569                     mir::BinOp::Mul => OverflowOp::Mul,
570                     _ => unreachable!()
571                 };
572                 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
573                 let res = bcx.call(intrinsic, &[lhs, rhs], None);
574
575                 (bcx.extract_value(res, 0),
576                  bcx.extract_value(res, 1))
577             }
578             mir::BinOp::Shl | mir::BinOp::Shr => {
579                 let lhs_llty = val_ty(lhs);
580                 let rhs_llty = val_ty(rhs);
581                 let invert_mask = bcx.with_block(|bcx| {
582                     common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
583                 });
584                 let outer_bits = bcx.and(rhs, invert_mask);
585
586                 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
587                 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
588
589                 (val, of)
590             }
591             _ => {
592                 bug!("Operator `{:?}` is not a checkable operator", op)
593             }
594         };
595
596         OperandValue::Pair(val, of)
597     }
598 }
599
600 pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
601                                           _bcx: &BlockAndBuilder<'bcx, 'tcx>,
602                                           rvalue: &mir::Rvalue<'tcx>) -> bool {
603     match *rvalue {
604         mir::Rvalue::Ref(..) |
605         mir::Rvalue::Len(..) |
606         mir::Rvalue::Cast(..) | // (*)
607         mir::Rvalue::BinaryOp(..) |
608         mir::Rvalue::CheckedBinaryOp(..) |
609         mir::Rvalue::UnaryOp(..) |
610         mir::Rvalue::Box(..) |
611         mir::Rvalue::Use(..) =>
612             true,
613         mir::Rvalue::Repeat(..) |
614         mir::Rvalue::Aggregate(..) |
615         mir::Rvalue::InlineAsm { .. } =>
616             false,
617     }
618
619     // (*) this is only true if the type is suitable
620 }
621
622 #[derive(Copy, Clone)]
623 enum OverflowOp {
624     Add, Sub, Mul
625 }
626
627 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef {
628     use syntax::ast::IntTy::*;
629     use syntax::ast::UintTy::*;
630     use rustc::ty::{TyInt, TyUint};
631
632     let tcx = bcx.tcx();
633
634     let new_sty = match ty.sty {
635         TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
636             "32" => TyInt(I32),
637             "64" => TyInt(I64),
638             _ => panic!("unsupported target word size")
639         },
640         TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
641             "32" => TyUint(U32),
642             "64" => TyUint(U64),
643             _ => panic!("unsupported target word size")
644         },
645         ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
646         _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
647     };
648
649     let name = match oop {
650         OverflowOp::Add => match new_sty {
651             TyInt(I8) => "llvm.sadd.with.overflow.i8",
652             TyInt(I16) => "llvm.sadd.with.overflow.i16",
653             TyInt(I32) => "llvm.sadd.with.overflow.i32",
654             TyInt(I64) => "llvm.sadd.with.overflow.i64",
655
656             TyUint(U8) => "llvm.uadd.with.overflow.i8",
657             TyUint(U16) => "llvm.uadd.with.overflow.i16",
658             TyUint(U32) => "llvm.uadd.with.overflow.i32",
659             TyUint(U64) => "llvm.uadd.with.overflow.i64",
660
661             _ => unreachable!(),
662         },
663         OverflowOp::Sub => match new_sty {
664             TyInt(I8) => "llvm.ssub.with.overflow.i8",
665             TyInt(I16) => "llvm.ssub.with.overflow.i16",
666             TyInt(I32) => "llvm.ssub.with.overflow.i32",
667             TyInt(I64) => "llvm.ssub.with.overflow.i64",
668
669             TyUint(U8) => "llvm.usub.with.overflow.i8",
670             TyUint(U16) => "llvm.usub.with.overflow.i16",
671             TyUint(U32) => "llvm.usub.with.overflow.i32",
672             TyUint(U64) => "llvm.usub.with.overflow.i64",
673
674             _ => unreachable!(),
675         },
676         OverflowOp::Mul => match new_sty {
677             TyInt(I8) => "llvm.smul.with.overflow.i8",
678             TyInt(I16) => "llvm.smul.with.overflow.i16",
679             TyInt(I32) => "llvm.smul.with.overflow.i32",
680             TyInt(I64) => "llvm.smul.with.overflow.i64",
681
682             TyUint(U8) => "llvm.umul.with.overflow.i8",
683             TyUint(U16) => "llvm.umul.with.overflow.i16",
684             TyUint(U32) => "llvm.umul.with.overflow.i32",
685             TyUint(U64) => "llvm.umul.with.overflow.i64",
686
687             _ => unreachable!(),
688         },
689     };
690
691     bcx.ccx().get_intrinsic(&name)
692 }