]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/mir/rvalue.rs
Rollup merge of #35728 - petrochenkov:empderive, r=manishearth
[rust.git] / src / librustc_trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::mir::repr as mir;
15
16 use asm;
17 use base;
18 use callee::Callee;
19 use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
20 use debuginfo::DebugLoc;
21 use adt;
22 use machine;
23 use type_of;
24 use tvec;
25 use value::Value;
26 use Disr;
27
28 use super::MirContext;
29 use super::constant::const_scalar_checked_binop;
30 use super::operand::{OperandRef, OperandValue};
31 use super::lvalue::{LvalueRef, get_dataptr};
32
33 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
34     pub fn trans_rvalue(&mut self,
35                         bcx: BlockAndBuilder<'bcx, 'tcx>,
36                         dest: LvalueRef<'tcx>,
37                         rvalue: &mir::Rvalue<'tcx>,
38                         debug_loc: DebugLoc)
39                         -> BlockAndBuilder<'bcx, 'tcx>
40     {
41         debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
42                Value(dest.llval), rvalue);
43
44         match *rvalue {
45            mir::Rvalue::Use(ref operand) => {
46                let tr_operand = self.trans_operand(&bcx, operand);
47                // FIXME: consider not copying constants through stack. (fixable by translating
48                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
49                self.store_operand(&bcx, dest.llval, tr_operand);
50                bcx
51            }
52
53             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
54                 let cast_ty = bcx.monomorphize(&cast_ty);
55
56                 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
57                     // into-coerce of a thin pointer to a fat pointer - just
58                     // use the operand path.
59                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
60                     self.store_operand(&bcx, dest.llval, temp);
61                     return bcx;
62                 }
63
64                 // Unsize of a nontrivial struct. I would prefer for
65                 // this to be eliminated by MIR translation, but
66                 // `CoerceUnsized` can be passed by a where-clause,
67                 // so the (generic) MIR may not be able to expand it.
68                 let operand = self.trans_operand(&bcx, source);
69                 let operand = operand.pack_if_pair(&bcx);
70                 bcx.with_block(|bcx| {
71                     match operand.val {
72                         OperandValue::Pair(..) => bug!(),
73                         OperandValue::Immediate(llval) => {
74                             // unsize from an immediate structure. We don't
75                             // really need a temporary alloca here, but
76                             // avoiding it would require us to have
77                             // `coerce_unsized_into` use extractvalue to
78                             // index into the struct, and this case isn't
79                             // important enough for it.
80                             debug!("trans_rvalue: creating ugly alloca");
81                             let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
82                             base::store_ty(bcx, llval, lltemp, operand.ty);
83                             base::coerce_unsized_into(bcx,
84                                                       lltemp, operand.ty,
85                                                       dest.llval, cast_ty);
86                         }
87                         OperandValue::Ref(llref) => {
88                             base::coerce_unsized_into(bcx,
89                                                       llref, operand.ty,
90                                                       dest.llval, cast_ty);
91                         }
92                     }
93                 });
94                 bcx
95             }
96
97             mir::Rvalue::Repeat(ref elem, ref count) => {
98                 let tr_elem = self.trans_operand(&bcx, elem);
99                 let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
100                 let size = C_uint(bcx.ccx(), size);
101                 let base = get_dataptr(&bcx, dest.llval);
102                 let bcx = bcx.map_block(|block| {
103                     tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| {
104                         self.store_operand_direct(block, llslot, tr_elem);
105                         block
106                     })
107                 });
108                 bcx
109             }
110
111             mir::Rvalue::Aggregate(ref kind, ref operands) => {
112                 match *kind {
113                     mir::AggregateKind::Adt(adt_def, index, _) => {
114                         let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
115                         let disr = Disr::from(adt_def.variants[index].disr_val);
116                         bcx.with_block(|bcx| {
117                             adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
118                         });
119                         for (i, operand) in operands.iter().enumerate() {
120                             let op = self.trans_operand(&bcx, operand);
121                             // Do not generate stores and GEPis for zero-sized fields.
122                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
123                                 let val = adt::MaybeSizedValue::sized(dest.llval);
124                                 let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
125                                                                             val, disr, i);
126                                 self.store_operand(&bcx, lldest_i, op);
127                             }
128                         }
129                     },
130                     _ => {
131                         // FIXME Shouldn't need to manually trigger closure instantiations.
132                         if let mir::AggregateKind::Closure(def_id, substs) = *kind {
133                             use closure;
134
135                             closure::trans_closure_body_via_mir(bcx.ccx(),
136                                                                 def_id,
137                                                                 bcx.monomorphize(&substs));
138                         }
139
140                         for (i, operand) in operands.iter().enumerate() {
141                             let op = self.trans_operand(&bcx, operand);
142                             // Do not generate stores and GEPis for zero-sized fields.
143                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
144                                 // Note: perhaps this should be StructGep, but
145                                 // note that in some cases the values here will
146                                 // not be structs but arrays.
147                                 let dest = bcx.gepi(dest.llval, &[0, i]);
148                                 self.store_operand(&bcx, dest, op);
149                             }
150                         }
151                     }
152                 }
153                 bcx
154             }
155
156             mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
157                 let outputs = outputs.iter().map(|output| {
158                     let lvalue = self.trans_lvalue(&bcx, output);
159                     (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
160                 }).collect();
161
162                 let input_vals = inputs.iter().map(|input| {
163                     self.trans_operand(&bcx, input).immediate()
164                 }).collect();
165
166                 bcx.with_block(|bcx| {
167                     asm::trans_inline_asm(bcx, asm, outputs, input_vals);
168                 });
169
170                 bcx
171             }
172
173             _ => {
174                 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
175                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
176                 self.store_operand(&bcx, dest.llval, temp);
177                 bcx
178             }
179         }
180     }
181
182     pub fn trans_rvalue_operand(&mut self,
183                                 bcx: BlockAndBuilder<'bcx, 'tcx>,
184                                 rvalue: &mir::Rvalue<'tcx>,
185                                 debug_loc: DebugLoc)
186                                 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
187     {
188         assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
189                 "cannot trans {:?} to operand", rvalue);
190
191         match *rvalue {
192             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
193                 let operand = self.trans_operand(&bcx, source);
194                 debug!("cast operand is {:?}", operand);
195                 let cast_ty = bcx.monomorphize(&cast_ty);
196
197                 let val = match *kind {
198                     mir::CastKind::ReifyFnPointer => {
199                         match operand.ty.sty {
200                             ty::TyFnDef(def_id, substs, _) => {
201                                 OperandValue::Immediate(
202                                     Callee::def(bcx.ccx(), def_id, substs)
203                                         .reify(bcx.ccx()))
204                             }
205                             _ => {
206                                 bug!("{} cannot be reified to a fn ptr", operand.ty)
207                             }
208                         }
209                     }
210                     mir::CastKind::UnsafeFnPointer => {
211                         // this is a no-op at the LLVM level
212                         operand.val
213                     }
214                     mir::CastKind::Unsize => {
215                         // unsize targets other than to a fat pointer currently
216                         // can't be operands.
217                         assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
218
219                         match operand.val {
220                             OperandValue::Pair(lldata, llextra) => {
221                                 // unsize from a fat pointer - this is a
222                                 // "trait-object-to-supertrait" coercion, for
223                                 // example,
224                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
225                                 // So we need to pointercast the base to ensure
226                                 // the types match up.
227                                 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
228                                 let lldata = bcx.pointercast(lldata, llcast_ty);
229                                 OperandValue::Pair(lldata, llextra)
230                             }
231                             OperandValue::Immediate(lldata) => {
232                                 // "standard" unsize
233                                 let (lldata, llextra) = bcx.with_block(|bcx| {
234                                     base::unsize_thin_ptr(bcx, lldata,
235                                                           operand.ty, cast_ty)
236                                 });
237                                 OperandValue::Pair(lldata, llextra)
238                             }
239                             OperandValue::Ref(_) => {
240                                 bug!("by-ref operand {:?} in trans_rvalue_operand",
241                                      operand);
242                             }
243                         }
244                     }
245                     mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
246                         let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
247                         let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
248                         if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
249                             if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
250                                 let ll_cft = ll_cast_ty.field_types();
251                                 let ll_fft = ll_from_ty.field_types();
252                                 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
253                                 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
254                                 OperandValue::Pair(data_cast, meta_ptr)
255                             } else { // cast to thin-ptr
256                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
257                                 // pointer-cast of that pointer to desired pointer type.
258                                 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
259                                 OperandValue::Immediate(llval)
260                             }
261                         } else {
262                             bug!("Unexpected non-Pair operand")
263                         }
264                     }
265                     mir::CastKind::Misc => {
266                         debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
267                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
268                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
269                         let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
270                         let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
271                         let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
272                             let repr = adt::represent_type(bcx.ccx(), operand.ty);
273                             let discr = match operand.val {
274                                 OperandValue::Immediate(llval) => llval,
275                                 OperandValue::Ref(llptr) => {
276                                     bcx.with_block(|bcx| {
277                                         adt::trans_get_discr(bcx, &repr, llptr, None, true)
278                                     })
279                                 }
280                                 OperandValue::Pair(..) => bug!("Unexpected Pair operand")
281                             };
282                             (discr, adt::is_discr_signed(&repr))
283                         } else {
284                             (operand.immediate(), operand.ty.is_signed())
285                         };
286
287                         let newval = match (r_t_in, r_t_out) {
288                             (CastTy::Int(_), CastTy::Int(_)) => {
289                                 let srcsz = ll_t_in.int_width();
290                                 let dstsz = ll_t_out.int_width();
291                                 if srcsz == dstsz {
292                                     bcx.bitcast(llval, ll_t_out)
293                                 } else if srcsz > dstsz {
294                                     bcx.trunc(llval, ll_t_out)
295                                 } else if signed {
296                                     bcx.sext(llval, ll_t_out)
297                                 } else {
298                                     bcx.zext(llval, ll_t_out)
299                                 }
300                             }
301                             (CastTy::Float, CastTy::Float) => {
302                                 let srcsz = ll_t_in.float_width();
303                                 let dstsz = ll_t_out.float_width();
304                                 if dstsz > srcsz {
305                                     bcx.fpext(llval, ll_t_out)
306                                 } else if srcsz > dstsz {
307                                     bcx.fptrunc(llval, ll_t_out)
308                                 } else {
309                                     llval
310                                 }
311                             }
312                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
313                             (CastTy::FnPtr, CastTy::Ptr(_)) |
314                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
315                                 bcx.pointercast(llval, ll_t_out),
316                             (CastTy::Ptr(_), CastTy::Int(_)) |
317                             (CastTy::FnPtr, CastTy::Int(_)) =>
318                                 bcx.ptrtoint(llval, ll_t_out),
319                             (CastTy::Int(_), CastTy::Ptr(_)) =>
320                                 bcx.inttoptr(llval, ll_t_out),
321                             (CastTy::Int(_), CastTy::Float) if signed =>
322                                 bcx.sitofp(llval, ll_t_out),
323                             (CastTy::Int(_), CastTy::Float) =>
324                                 bcx.uitofp(llval, ll_t_out),
325                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
326                                 bcx.fptosi(llval, ll_t_out),
327                             (CastTy::Float, CastTy::Int(_)) =>
328                                 bcx.fptoui(llval, ll_t_out),
329                             _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
330                         };
331                         OperandValue::Immediate(newval)
332                     }
333                 };
334                 let operand = OperandRef {
335                     val: val,
336                     ty: cast_ty
337                 };
338                 (bcx, operand)
339             }
340
341             mir::Rvalue::Ref(_, bk, ref lvalue) => {
342                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
343
344                 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
345                 let ref_ty = bcx.tcx().mk_ref(
346                     bcx.tcx().mk_region(ty::ReErased),
347                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
348                 );
349
350                 // Note: lvalues are indirect, so storing the `llval` into the
351                 // destination effectively creates a reference.
352                 let operand = if common::type_is_sized(bcx.tcx(), ty) {
353                     OperandRef {
354                         val: OperandValue::Immediate(tr_lvalue.llval),
355                         ty: ref_ty,
356                     }
357                 } else {
358                     OperandRef {
359                         val: OperandValue::Pair(tr_lvalue.llval,
360                                                 tr_lvalue.llextra),
361                         ty: ref_ty,
362                     }
363                 };
364                 (bcx, operand)
365             }
366
367             mir::Rvalue::Len(ref lvalue) => {
368                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
369                 let operand = OperandRef {
370                     val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())),
371                     ty: bcx.tcx().types.usize,
372                 };
373                 (bcx, operand)
374             }
375
376             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
377                 let lhs = self.trans_operand(&bcx, lhs);
378                 let rhs = self.trans_operand(&bcx, rhs);
379                 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
380                     match (lhs.val, rhs.val) {
381                         (OperandValue::Pair(lhs_addr, lhs_extra),
382                          OperandValue::Pair(rhs_addr, rhs_extra)) => {
383                             bcx.with_block(|bcx| {
384                                 base::compare_fat_ptrs(bcx,
385                                                        lhs_addr, lhs_extra,
386                                                        rhs_addr, rhs_extra,
387                                                        lhs.ty, op.to_hir_binop(),
388                                                        debug_loc)
389                             })
390                         }
391                         _ => bug!()
392                     }
393
394                 } else {
395                     self.trans_scalar_binop(&bcx, op,
396                                             lhs.immediate(), rhs.immediate(),
397                                             lhs.ty)
398                 };
399                 let operand = OperandRef {
400                     val: OperandValue::Immediate(llresult),
401                     ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
402                 };
403                 (bcx, operand)
404             }
405             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
406                 let lhs = self.trans_operand(&bcx, lhs);
407                 let rhs = self.trans_operand(&bcx, rhs);
408                 let result = self.trans_scalar_checked_binop(&bcx, op,
409                                                              lhs.immediate(), rhs.immediate(),
410                                                              lhs.ty);
411                 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
412                 let operand_ty = bcx.tcx().mk_tup(vec![val_ty, bcx.tcx().types.bool]);
413                 let operand = OperandRef {
414                     val: result,
415                     ty: operand_ty
416                 };
417
418                 (bcx, operand)
419             }
420
421             mir::Rvalue::UnaryOp(op, ref operand) => {
422                 let operand = self.trans_operand(&bcx, operand);
423                 let lloperand = operand.immediate();
424                 let is_float = operand.ty.is_fp();
425                 let llval = match op {
426                     mir::UnOp::Not => bcx.not(lloperand),
427                     mir::UnOp::Neg => if is_float {
428                         bcx.fneg(lloperand)
429                     } else {
430                         bcx.neg(lloperand)
431                     }
432                 };
433                 (bcx, OperandRef {
434                     val: OperandValue::Immediate(llval),
435                     ty: operand.ty,
436                 })
437             }
438
439             mir::Rvalue::Box(content_ty) => {
440                 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
441                 let llty = type_of::type_of(bcx.ccx(), content_ty);
442                 let llsize = machine::llsize_of(bcx.ccx(), llty);
443                 let align = type_of::align_of(bcx.ccx(), content_ty);
444                 let llalign = C_uint(bcx.ccx(), align);
445                 let llty_ptr = llty.ptr_to();
446                 let box_ty = bcx.tcx().mk_box(content_ty);
447                 let mut llval = None;
448                 let bcx = bcx.map_block(|bcx| {
449                     let Result { bcx, val } = base::malloc_raw_dyn(bcx,
450                                                                    llty_ptr,
451                                                                    box_ty,
452                                                                    llsize,
453                                                                    llalign,
454                                                                    debug_loc);
455                     llval = Some(val);
456                     bcx
457                 });
458                 let operand = OperandRef {
459                     val: OperandValue::Immediate(llval.unwrap()),
460                     ty: box_ty,
461                 };
462                 (bcx, operand)
463             }
464
465             mir::Rvalue::Use(ref operand) => {
466                 let operand = self.trans_operand(&bcx, operand);
467                 (bcx, operand)
468             }
469             mir::Rvalue::Repeat(..) |
470             mir::Rvalue::Aggregate(..) |
471             mir::Rvalue::InlineAsm { .. } => {
472                 bug!("cannot generate operand from rvalue {:?}", rvalue);
473
474             }
475         }
476     }
477
478     pub fn trans_scalar_binop(&mut self,
479                               bcx: &BlockAndBuilder<'bcx, 'tcx>,
480                               op: mir::BinOp,
481                               lhs: ValueRef,
482                               rhs: ValueRef,
483                               input_ty: Ty<'tcx>) -> ValueRef {
484         let is_float = input_ty.is_fp();
485         let is_signed = input_ty.is_signed();
486         match op {
487             mir::BinOp::Add => if is_float {
488                 bcx.fadd(lhs, rhs)
489             } else {
490                 bcx.add(lhs, rhs)
491             },
492             mir::BinOp::Sub => if is_float {
493                 bcx.fsub(lhs, rhs)
494             } else {
495                 bcx.sub(lhs, rhs)
496             },
497             mir::BinOp::Mul => if is_float {
498                 bcx.fmul(lhs, rhs)
499             } else {
500                 bcx.mul(lhs, rhs)
501             },
502             mir::BinOp::Div => if is_float {
503                 bcx.fdiv(lhs, rhs)
504             } else if is_signed {
505                 bcx.sdiv(lhs, rhs)
506             } else {
507                 bcx.udiv(lhs, rhs)
508             },
509             mir::BinOp::Rem => if is_float {
510                 bcx.frem(lhs, rhs)
511             } else if is_signed {
512                 bcx.srem(lhs, rhs)
513             } else {
514                 bcx.urem(lhs, rhs)
515             },
516             mir::BinOp::BitOr => bcx.or(lhs, rhs),
517             mir::BinOp::BitAnd => bcx.and(lhs, rhs),
518             mir::BinOp::BitXor => bcx.xor(lhs, rhs),
519             mir::BinOp::Shl => {
520                 bcx.with_block(|bcx| {
521                     common::build_unchecked_lshift(bcx,
522                                                    lhs,
523                                                    rhs,
524                                                    DebugLoc::None)
525                 })
526             }
527             mir::BinOp::Shr => {
528                 bcx.with_block(|bcx| {
529                     common::build_unchecked_rshift(bcx,
530                                                    input_ty,
531                                                    lhs,
532                                                    rhs,
533                                                    DebugLoc::None)
534                 })
535             }
536             mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
537             mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
538                 bcx.with_block(|bcx| {
539                     base::compare_scalar_types(bcx, lhs, rhs, input_ty,
540                                                op.to_hir_binop(), DebugLoc::None)
541                 })
542             }
543         }
544     }
545
546     pub fn trans_scalar_checked_binop(&mut self,
547                                       bcx: &BlockAndBuilder<'bcx, 'tcx>,
548                                       op: mir::BinOp,
549                                       lhs: ValueRef,
550                                       rhs: ValueRef,
551                                       input_ty: Ty<'tcx>) -> OperandValue {
552         // This case can currently arise only from functions marked
553         // with #[rustc_inherit_overflow_checks] and inlined from
554         // another crate (mostly core::num generic/#[inline] fns),
555         // while the current crate doesn't use overflow checks.
556         if !bcx.ccx().check_overflow() {
557             let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
558             return OperandValue::Pair(val, C_bool(bcx.ccx(), false));
559         }
560
561         // First try performing the operation on constants, which
562         // will only succeed if both operands are constant.
563         // This is necessary to determine when an overflow Assert
564         // will always panic at runtime, and produce a warning.
565         if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
566             return OperandValue::Pair(val, C_bool(bcx.ccx(), of));
567         }
568
569         let (val, of) = match op {
570             // These are checked using intrinsics
571             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
572                 let oop = match op {
573                     mir::BinOp::Add => OverflowOp::Add,
574                     mir::BinOp::Sub => OverflowOp::Sub,
575                     mir::BinOp::Mul => OverflowOp::Mul,
576                     _ => unreachable!()
577                 };
578                 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
579                 let res = bcx.call(intrinsic, &[lhs, rhs], None);
580
581                 (bcx.extract_value(res, 0),
582                  bcx.extract_value(res, 1))
583             }
584             mir::BinOp::Shl | mir::BinOp::Shr => {
585                 let lhs_llty = val_ty(lhs);
586                 let rhs_llty = val_ty(rhs);
587                 let invert_mask = bcx.with_block(|bcx| {
588                     common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
589                 });
590                 let outer_bits = bcx.and(rhs, invert_mask);
591
592                 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
593                 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
594
595                 (val, of)
596             }
597             _ => {
598                 bug!("Operator `{:?}` is not a checkable operator", op)
599             }
600         };
601
602         OperandValue::Pair(val, of)
603     }
604 }
605
606 pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
607                                           _bcx: &BlockAndBuilder<'bcx, 'tcx>,
608                                           rvalue: &mir::Rvalue<'tcx>) -> bool {
609     match *rvalue {
610         mir::Rvalue::Ref(..) |
611         mir::Rvalue::Len(..) |
612         mir::Rvalue::Cast(..) | // (*)
613         mir::Rvalue::BinaryOp(..) |
614         mir::Rvalue::CheckedBinaryOp(..) |
615         mir::Rvalue::UnaryOp(..) |
616         mir::Rvalue::Box(..) |
617         mir::Rvalue::Use(..) =>
618             true,
619         mir::Rvalue::Repeat(..) |
620         mir::Rvalue::Aggregate(..) |
621         mir::Rvalue::InlineAsm { .. } =>
622             false,
623     }
624
625     // (*) this is only true if the type is suitable
626 }
627
628 #[derive(Copy, Clone)]
629 enum OverflowOp {
630     Add, Sub, Mul
631 }
632
633 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef {
634     use syntax::ast::IntTy::*;
635     use syntax::ast::UintTy::*;
636     use rustc::ty::{TyInt, TyUint};
637
638     let tcx = bcx.tcx();
639
640     let new_sty = match ty.sty {
641         TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
642             "32" => TyInt(I32),
643             "64" => TyInt(I64),
644             _ => panic!("unsupported target word size")
645         },
646         TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
647             "32" => TyUint(U32),
648             "64" => TyUint(U64),
649             _ => panic!("unsupported target word size")
650         },
651         ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
652         _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
653     };
654
655     let name = match oop {
656         OverflowOp::Add => match new_sty {
657             TyInt(I8) => "llvm.sadd.with.overflow.i8",
658             TyInt(I16) => "llvm.sadd.with.overflow.i16",
659             TyInt(I32) => "llvm.sadd.with.overflow.i32",
660             TyInt(I64) => "llvm.sadd.with.overflow.i64",
661
662             TyUint(U8) => "llvm.uadd.with.overflow.i8",
663             TyUint(U16) => "llvm.uadd.with.overflow.i16",
664             TyUint(U32) => "llvm.uadd.with.overflow.i32",
665             TyUint(U64) => "llvm.uadd.with.overflow.i64",
666
667             _ => unreachable!(),
668         },
669         OverflowOp::Sub => match new_sty {
670             TyInt(I8) => "llvm.ssub.with.overflow.i8",
671             TyInt(I16) => "llvm.ssub.with.overflow.i16",
672             TyInt(I32) => "llvm.ssub.with.overflow.i32",
673             TyInt(I64) => "llvm.ssub.with.overflow.i64",
674
675             TyUint(U8) => "llvm.usub.with.overflow.i8",
676             TyUint(U16) => "llvm.usub.with.overflow.i16",
677             TyUint(U32) => "llvm.usub.with.overflow.i32",
678             TyUint(U64) => "llvm.usub.with.overflow.i64",
679
680             _ => unreachable!(),
681         },
682         OverflowOp::Mul => match new_sty {
683             TyInt(I8) => "llvm.smul.with.overflow.i8",
684             TyInt(I16) => "llvm.smul.with.overflow.i16",
685             TyInt(I32) => "llvm.smul.with.overflow.i32",
686             TyInt(I64) => "llvm.smul.with.overflow.i64",
687
688             TyUint(U8) => "llvm.umul.with.overflow.i8",
689             TyUint(U16) => "llvm.umul.with.overflow.i16",
690             TyUint(U32) => "llvm.umul.with.overflow.i32",
691             TyUint(U64) => "llvm.umul.with.overflow.i64",
692
693             _ => unreachable!(),
694         },
695     };
696
697     bcx.ccx().get_intrinsic(&name)
698 }