]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/mir/rvalue.rs
Update E0253.rs
[rust.git] / src / librustc_trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::mir::repr as mir;
15
16 use asm;
17 use base;
18 use callee::Callee;
19 use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
20 use datum::{Datum, Lvalue};
21 use debuginfo::DebugLoc;
22 use adt;
23 use machine;
24 use type_of;
25 use tvec;
26 use value::Value;
27 use Disr;
28
29 use super::MirContext;
30 use super::constant::const_scalar_checked_binop;
31 use super::operand::{OperandRef, OperandValue};
32 use super::lvalue::{LvalueRef, get_dataptr};
33
34 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
35     pub fn trans_rvalue(&mut self,
36                         bcx: BlockAndBuilder<'bcx, 'tcx>,
37                         dest: LvalueRef<'tcx>,
38                         rvalue: &mir::Rvalue<'tcx>,
39                         debug_loc: DebugLoc)
40                         -> BlockAndBuilder<'bcx, 'tcx>
41     {
42         debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
43                Value(dest.llval), rvalue);
44
45         match *rvalue {
46            mir::Rvalue::Use(ref operand) => {
47                let tr_operand = self.trans_operand(&bcx, operand);
48                // FIXME: consider not copying constants through stack. (fixable by translating
49                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
50                self.store_operand(&bcx, dest.llval, tr_operand);
51                bcx
52            }
53
54             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
55                 let cast_ty = bcx.monomorphize(&cast_ty);
56
57                 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
58                     // into-coerce of a thin pointer to a fat pointer - just
59                     // use the operand path.
60                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
61                     self.store_operand(&bcx, dest.llval, temp);
62                     return bcx;
63                 }
64
65                 // Unsize of a nontrivial struct. I would prefer for
66                 // this to be eliminated by MIR translation, but
67                 // `CoerceUnsized` can be passed by a where-clause,
68                 // so the (generic) MIR may not be able to expand it.
69                 let operand = self.trans_operand(&bcx, source);
70                 let operand = operand.pack_if_pair(&bcx);
71                 bcx.with_block(|bcx| {
72                     match operand.val {
73                         OperandValue::Pair(..) => bug!(),
74                         OperandValue::Immediate(llval) => {
75                             // unsize from an immediate structure. We don't
76                             // really need a temporary alloca here, but
77                             // avoiding it would require us to have
78                             // `coerce_unsized_into` use extractvalue to
79                             // index into the struct, and this case isn't
80                             // important enough for it.
81                             debug!("trans_rvalue: creating ugly alloca");
82                             let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
83                             base::store_ty(bcx, llval, lltemp, operand.ty);
84                             base::coerce_unsized_into(bcx,
85                                                       lltemp, operand.ty,
86                                                       dest.llval, cast_ty);
87                         }
88                         OperandValue::Ref(llref) => {
89                             base::coerce_unsized_into(bcx,
90                                                       llref, operand.ty,
91                                                       dest.llval, cast_ty);
92                         }
93                     }
94                 });
95                 bcx
96             }
97
98             mir::Rvalue::Repeat(ref elem, ref count) => {
99                 let tr_elem = self.trans_operand(&bcx, elem);
100                 let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
101                 let size = C_uint(bcx.ccx(), size);
102                 let base = get_dataptr(&bcx, dest.llval);
103                 let bcx = bcx.map_block(|block| {
104                     tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
105                         self.store_operand_direct(block, llslot, tr_elem);
106                         block
107                     })
108                 });
109                 bcx
110             }
111
112             mir::Rvalue::Aggregate(ref kind, ref operands) => {
113                 match *kind {
114                     mir::AggregateKind::Adt(adt_def, index, _) => {
115                         let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
116                         let disr = Disr::from(adt_def.variants[index].disr_val);
117                         bcx.with_block(|bcx| {
118                             adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
119                         });
120                         for (i, operand) in operands.iter().enumerate() {
121                             let op = self.trans_operand(&bcx, operand);
122                             // Do not generate stores and GEPis for zero-sized fields.
123                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
124                                 let val = adt::MaybeSizedValue::sized(dest.llval);
125                                 let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
126                                                                             val, disr, i);
127                                 self.store_operand(&bcx, lldest_i, op);
128                             }
129                         }
130                     },
131                     _ => {
132                         // FIXME Shouldn't need to manually trigger closure instantiations.
133                         if let mir::AggregateKind::Closure(def_id, substs) = *kind {
134                             use closure;
135
136                             closure::trans_closure_body_via_mir(bcx.ccx(),
137                                                                 def_id,
138                                                                 bcx.monomorphize(&substs));
139                         }
140
141                         for (i, operand) in operands.iter().enumerate() {
142                             let op = self.trans_operand(&bcx, operand);
143                             // Do not generate stores and GEPis for zero-sized fields.
144                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
145                                 // Note: perhaps this should be StructGep, but
146                                 // note that in some cases the values here will
147                                 // not be structs but arrays.
148                                 let dest = bcx.gepi(dest.llval, &[0, i]);
149                                 self.store_operand(&bcx, dest, op);
150                             }
151                         }
152                     }
153                 }
154                 bcx
155             }
156
157             mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
158                 let outputs = outputs.iter().map(|output| {
159                     let lvalue = self.trans_lvalue(&bcx, output);
160                     Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
161                                Lvalue::new("out"))
162                 }).collect();
163
164                 let input_vals = inputs.iter().map(|input| {
165                     self.trans_operand(&bcx, input).immediate()
166                 }).collect();
167
168                 bcx.with_block(|bcx| {
169                     asm::trans_inline_asm(bcx, asm, outputs, input_vals);
170                 });
171
172                 bcx
173             }
174
175             _ => {
176                 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
177                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
178                 self.store_operand(&bcx, dest.llval, temp);
179                 bcx
180             }
181         }
182     }
183
184     pub fn trans_rvalue_operand(&mut self,
185                                 bcx: BlockAndBuilder<'bcx, 'tcx>,
186                                 rvalue: &mir::Rvalue<'tcx>,
187                                 debug_loc: DebugLoc)
188                                 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
189     {
190         assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
191                 "cannot trans {:?} to operand", rvalue);
192
193         match *rvalue {
194             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
195                 let operand = self.trans_operand(&bcx, source);
196                 debug!("cast operand is {:?}", operand);
197                 let cast_ty = bcx.monomorphize(&cast_ty);
198
199                 let val = match *kind {
200                     mir::CastKind::ReifyFnPointer => {
201                         match operand.ty.sty {
202                             ty::TyFnDef(def_id, substs, _) => {
203                                 OperandValue::Immediate(
204                                     Callee::def(bcx.ccx(), def_id, substs)
205                                         .reify(bcx.ccx()).val)
206                             }
207                             _ => {
208                                 bug!("{} cannot be reified to a fn ptr", operand.ty)
209                             }
210                         }
211                     }
212                     mir::CastKind::UnsafeFnPointer => {
213                         // this is a no-op at the LLVM level
214                         operand.val
215                     }
216                     mir::CastKind::Unsize => {
217                         // unsize targets other than to a fat pointer currently
218                         // can't be operands.
219                         assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
220
221                         match operand.val {
222                             OperandValue::Pair(lldata, llextra) => {
223                                 // unsize from a fat pointer - this is a
224                                 // "trait-object-to-supertrait" coercion, for
225                                 // example,
226                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
227                                 // So we need to pointercast the base to ensure
228                                 // the types match up.
229                                 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
230                                 let lldata = bcx.pointercast(lldata, llcast_ty);
231                                 OperandValue::Pair(lldata, llextra)
232                             }
233                             OperandValue::Immediate(lldata) => {
234                                 // "standard" unsize
235                                 let (lldata, llextra) = bcx.with_block(|bcx| {
236                                     base::unsize_thin_ptr(bcx, lldata,
237                                                           operand.ty, cast_ty)
238                                 });
239                                 OperandValue::Pair(lldata, llextra)
240                             }
241                             OperandValue::Ref(_) => {
242                                 bug!("by-ref operand {:?} in trans_rvalue_operand",
243                                      operand);
244                             }
245                         }
246                     }
247                     mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
248                         let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
249                         let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
250                         if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
251                             if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
252                                 let ll_cft = ll_cast_ty.field_types();
253                                 let ll_fft = ll_from_ty.field_types();
254                                 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
255                                 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
256                                 OperandValue::Pair(data_cast, meta_ptr)
257                             } else { // cast to thin-ptr
258                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
259                                 // pointer-cast of that pointer to desired pointer type.
260                                 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
261                                 OperandValue::Immediate(llval)
262                             }
263                         } else {
264                             bug!("Unexpected non-Pair operand")
265                         }
266                     }
267                     mir::CastKind::Misc => {
268                         debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
269                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
270                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
271                         let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
272                         let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
273                         let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
274                             let repr = adt::represent_type(bcx.ccx(), operand.ty);
275                             let discr = match operand.val {
276                                 OperandValue::Immediate(llval) => llval,
277                                 OperandValue::Ref(llptr) => {
278                                     bcx.with_block(|bcx| {
279                                         adt::trans_get_discr(bcx, &repr, llptr, None, true)
280                                     })
281                                 }
282                                 OperandValue::Pair(..) => bug!("Unexpected Pair operand")
283                             };
284                             (discr, adt::is_discr_signed(&repr))
285                         } else {
286                             (operand.immediate(), operand.ty.is_signed())
287                         };
288
289                         let newval = match (r_t_in, r_t_out) {
290                             (CastTy::Int(_), CastTy::Int(_)) => {
291                                 let srcsz = ll_t_in.int_width();
292                                 let dstsz = ll_t_out.int_width();
293                                 if srcsz == dstsz {
294                                     bcx.bitcast(llval, ll_t_out)
295                                 } else if srcsz > dstsz {
296                                     bcx.trunc(llval, ll_t_out)
297                                 } else if signed {
298                                     bcx.sext(llval, ll_t_out)
299                                 } else {
300                                     bcx.zext(llval, ll_t_out)
301                                 }
302                             }
303                             (CastTy::Float, CastTy::Float) => {
304                                 let srcsz = ll_t_in.float_width();
305                                 let dstsz = ll_t_out.float_width();
306                                 if dstsz > srcsz {
307                                     bcx.fpext(llval, ll_t_out)
308                                 } else if srcsz > dstsz {
309                                     bcx.fptrunc(llval, ll_t_out)
310                                 } else {
311                                     llval
312                                 }
313                             }
314                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
315                             (CastTy::FnPtr, CastTy::Ptr(_)) |
316                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
317                                 bcx.pointercast(llval, ll_t_out),
318                             (CastTy::Ptr(_), CastTy::Int(_)) |
319                             (CastTy::FnPtr, CastTy::Int(_)) =>
320                                 bcx.ptrtoint(llval, ll_t_out),
321                             (CastTy::Int(_), CastTy::Ptr(_)) =>
322                                 bcx.inttoptr(llval, ll_t_out),
323                             (CastTy::Int(_), CastTy::Float) if signed =>
324                                 bcx.sitofp(llval, ll_t_out),
325                             (CastTy::Int(_), CastTy::Float) =>
326                                 bcx.uitofp(llval, ll_t_out),
327                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
328                                 bcx.fptosi(llval, ll_t_out),
329                             (CastTy::Float, CastTy::Int(_)) =>
330                                 bcx.fptoui(llval, ll_t_out),
331                             _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
332                         };
333                         OperandValue::Immediate(newval)
334                     }
335                 };
336                 let operand = OperandRef {
337                     val: val,
338                     ty: cast_ty
339                 };
340                 (bcx, operand)
341             }
342
343             mir::Rvalue::Ref(_, bk, ref lvalue) => {
344                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
345
346                 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
347                 let ref_ty = bcx.tcx().mk_ref(
348                     bcx.tcx().mk_region(ty::ReErased),
349                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
350                 );
351
352                 // Note: lvalues are indirect, so storing the `llval` into the
353                 // destination effectively creates a reference.
354                 let operand = if common::type_is_sized(bcx.tcx(), ty) {
355                     OperandRef {
356                         val: OperandValue::Immediate(tr_lvalue.llval),
357                         ty: ref_ty,
358                     }
359                 } else {
360                     OperandRef {
361                         val: OperandValue::Pair(tr_lvalue.llval,
362                                                 tr_lvalue.llextra),
363                         ty: ref_ty,
364                     }
365                 };
366                 (bcx, operand)
367             }
368
369             mir::Rvalue::Len(ref lvalue) => {
370                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
371                 let operand = OperandRef {
372                     val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())),
373                     ty: bcx.tcx().types.usize,
374                 };
375                 (bcx, operand)
376             }
377
378             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
379                 let lhs = self.trans_operand(&bcx, lhs);
380                 let rhs = self.trans_operand(&bcx, rhs);
381                 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
382                     match (lhs.val, rhs.val) {
383                         (OperandValue::Pair(lhs_addr, lhs_extra),
384                          OperandValue::Pair(rhs_addr, rhs_extra)) => {
385                             bcx.with_block(|bcx| {
386                                 base::compare_fat_ptrs(bcx,
387                                                        lhs_addr, lhs_extra,
388                                                        rhs_addr, rhs_extra,
389                                                        lhs.ty, op.to_hir_binop(),
390                                                        debug_loc)
391                             })
392                         }
393                         _ => bug!()
394                     }
395
396                 } else {
397                     self.trans_scalar_binop(&bcx, op,
398                                             lhs.immediate(), rhs.immediate(),
399                                             lhs.ty)
400                 };
401                 let operand = OperandRef {
402                     val: OperandValue::Immediate(llresult),
403                     ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
404                 };
405                 (bcx, operand)
406             }
407             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
408                 let lhs = self.trans_operand(&bcx, lhs);
409                 let rhs = self.trans_operand(&bcx, rhs);
410                 let result = self.trans_scalar_checked_binop(&bcx, op,
411                                                              lhs.immediate(), rhs.immediate(),
412                                                              lhs.ty);
413                 let val_ty = self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty);
414                 let operand_ty = bcx.tcx().mk_tup(vec![val_ty, bcx.tcx().types.bool]);
415                 let operand = OperandRef {
416                     val: result,
417                     ty: operand_ty
418                 };
419
420                 (bcx, operand)
421             }
422
423             mir::Rvalue::UnaryOp(op, ref operand) => {
424                 let operand = self.trans_operand(&bcx, operand);
425                 let lloperand = operand.immediate();
426                 let is_float = operand.ty.is_fp();
427                 let llval = match op {
428                     mir::UnOp::Not => bcx.not(lloperand),
429                     mir::UnOp::Neg => if is_float {
430                         bcx.fneg(lloperand)
431                     } else {
432                         bcx.neg(lloperand)
433                     }
434                 };
435                 (bcx, OperandRef {
436                     val: OperandValue::Immediate(llval),
437                     ty: operand.ty,
438                 })
439             }
440
441             mir::Rvalue::Box(content_ty) => {
442                 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
443                 let llty = type_of::type_of(bcx.ccx(), content_ty);
444                 let llsize = machine::llsize_of(bcx.ccx(), llty);
445                 let align = type_of::align_of(bcx.ccx(), content_ty);
446                 let llalign = C_uint(bcx.ccx(), align);
447                 let llty_ptr = llty.ptr_to();
448                 let box_ty = bcx.tcx().mk_box(content_ty);
449                 let mut llval = None;
450                 let bcx = bcx.map_block(|bcx| {
451                     let Result { bcx, val } = base::malloc_raw_dyn(bcx,
452                                                                    llty_ptr,
453                                                                    box_ty,
454                                                                    llsize,
455                                                                    llalign,
456                                                                    debug_loc);
457                     llval = Some(val);
458                     bcx
459                 });
460                 let operand = OperandRef {
461                     val: OperandValue::Immediate(llval.unwrap()),
462                     ty: box_ty,
463                 };
464                 (bcx, operand)
465             }
466
467             mir::Rvalue::Use(ref operand) => {
468                 let operand = self.trans_operand(&bcx, operand);
469                 (bcx, operand)
470             }
471             mir::Rvalue::Repeat(..) |
472             mir::Rvalue::Aggregate(..) |
473             mir::Rvalue::InlineAsm { .. } => {
474                 bug!("cannot generate operand from rvalue {:?}", rvalue);
475
476             }
477         }
478     }
479
480     pub fn trans_scalar_binop(&mut self,
481                               bcx: &BlockAndBuilder<'bcx, 'tcx>,
482                               op: mir::BinOp,
483                               lhs: ValueRef,
484                               rhs: ValueRef,
485                               input_ty: Ty<'tcx>) -> ValueRef {
486         let is_float = input_ty.is_fp();
487         let is_signed = input_ty.is_signed();
488         match op {
489             mir::BinOp::Add => if is_float {
490                 bcx.fadd(lhs, rhs)
491             } else {
492                 bcx.add(lhs, rhs)
493             },
494             mir::BinOp::Sub => if is_float {
495                 bcx.fsub(lhs, rhs)
496             } else {
497                 bcx.sub(lhs, rhs)
498             },
499             mir::BinOp::Mul => if is_float {
500                 bcx.fmul(lhs, rhs)
501             } else {
502                 bcx.mul(lhs, rhs)
503             },
504             mir::BinOp::Div => if is_float {
505                 bcx.fdiv(lhs, rhs)
506             } else if is_signed {
507                 bcx.sdiv(lhs, rhs)
508             } else {
509                 bcx.udiv(lhs, rhs)
510             },
511             mir::BinOp::Rem => if is_float {
512                 bcx.frem(lhs, rhs)
513             } else if is_signed {
514                 bcx.srem(lhs, rhs)
515             } else {
516                 bcx.urem(lhs, rhs)
517             },
518             mir::BinOp::BitOr => bcx.or(lhs, rhs),
519             mir::BinOp::BitAnd => bcx.and(lhs, rhs),
520             mir::BinOp::BitXor => bcx.xor(lhs, rhs),
521             mir::BinOp::Shl => {
522                 bcx.with_block(|bcx| {
523                     common::build_unchecked_lshift(bcx,
524                                                    lhs,
525                                                    rhs,
526                                                    DebugLoc::None)
527                 })
528             }
529             mir::BinOp::Shr => {
530                 bcx.with_block(|bcx| {
531                     common::build_unchecked_rshift(bcx,
532                                                    input_ty,
533                                                    lhs,
534                                                    rhs,
535                                                    DebugLoc::None)
536                 })
537             }
538             mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
539             mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
540                 bcx.with_block(|bcx| {
541                     base::compare_scalar_types(bcx, lhs, rhs, input_ty,
542                                                op.to_hir_binop(), DebugLoc::None)
543                 })
544             }
545         }
546     }
547
548     pub fn trans_scalar_checked_binop(&mut self,
549                                       bcx: &BlockAndBuilder<'bcx, 'tcx>,
550                                       op: mir::BinOp,
551                                       lhs: ValueRef,
552                                       rhs: ValueRef,
553                                       input_ty: Ty<'tcx>) -> OperandValue {
554         // This case can currently arise only from functions marked
555         // with #[rustc_inherit_overflow_checks] and inlined from
556         // another crate (mostly core::num generic/#[inline] fns),
557         // while the current crate doesn't use overflow checks.
558         if !bcx.ccx().check_overflow() {
559             let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
560             return OperandValue::Pair(val, C_bool(bcx.ccx(), false));
561         }
562
563         // First try performing the operation on constants, which
564         // will only succeed if both operands are constant.
565         // This is necessary to determine when an overflow Assert
566         // will always panic at runtime, and produce a warning.
567         if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
568             return OperandValue::Pair(val, C_bool(bcx.ccx(), of));
569         }
570
571         let (val, of) = match op {
572             // These are checked using intrinsics
573             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
574                 let oop = match op {
575                     mir::BinOp::Add => OverflowOp::Add,
576                     mir::BinOp::Sub => OverflowOp::Sub,
577                     mir::BinOp::Mul => OverflowOp::Mul,
578                     _ => unreachable!()
579                 };
580                 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
581                 let res = bcx.call(intrinsic, &[lhs, rhs], None);
582
583                 (bcx.extract_value(res, 0),
584                  bcx.extract_value(res, 1))
585             }
586             mir::BinOp::Shl | mir::BinOp::Shr => {
587                 let lhs_llty = val_ty(lhs);
588                 let rhs_llty = val_ty(rhs);
589                 let invert_mask = bcx.with_block(|bcx| {
590                     common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
591                 });
592                 let outer_bits = bcx.and(rhs, invert_mask);
593
594                 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
595                 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
596
597                 (val, of)
598             }
599             _ => {
600                 bug!("Operator `{:?}` is not a checkable operator", op)
601             }
602         };
603
604         OperandValue::Pair(val, of)
605     }
606 }
607
608 pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
609                                           _bcx: &BlockAndBuilder<'bcx, 'tcx>,
610                                           rvalue: &mir::Rvalue<'tcx>) -> bool {
611     match *rvalue {
612         mir::Rvalue::Ref(..) |
613         mir::Rvalue::Len(..) |
614         mir::Rvalue::Cast(..) | // (*)
615         mir::Rvalue::BinaryOp(..) |
616         mir::Rvalue::CheckedBinaryOp(..) |
617         mir::Rvalue::UnaryOp(..) |
618         mir::Rvalue::Box(..) |
619         mir::Rvalue::Use(..) =>
620             true,
621         mir::Rvalue::Repeat(..) |
622         mir::Rvalue::Aggregate(..) |
623         mir::Rvalue::InlineAsm { .. } =>
624             false,
625     }
626
627     // (*) this is only true if the type is suitable
628 }
629
630 #[derive(Copy, Clone)]
631 enum OverflowOp {
632     Add, Sub, Mul
633 }
634
635 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef {
636     use syntax::ast::IntTy::*;
637     use syntax::ast::UintTy::*;
638     use rustc::ty::{TyInt, TyUint};
639
640     let tcx = bcx.tcx();
641
642     let new_sty = match ty.sty {
643         TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
644             "32" => TyInt(I32),
645             "64" => TyInt(I64),
646             _ => panic!("unsupported target word size")
647         },
648         TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
649             "32" => TyUint(U32),
650             "64" => TyUint(U64),
651             _ => panic!("unsupported target word size")
652         },
653         ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
654         _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
655     };
656
657     let name = match oop {
658         OverflowOp::Add => match new_sty {
659             TyInt(I8) => "llvm.sadd.with.overflow.i8",
660             TyInt(I16) => "llvm.sadd.with.overflow.i16",
661             TyInt(I32) => "llvm.sadd.with.overflow.i32",
662             TyInt(I64) => "llvm.sadd.with.overflow.i64",
663
664             TyUint(U8) => "llvm.uadd.with.overflow.i8",
665             TyUint(U16) => "llvm.uadd.with.overflow.i16",
666             TyUint(U32) => "llvm.uadd.with.overflow.i32",
667             TyUint(U64) => "llvm.uadd.with.overflow.i64",
668
669             _ => unreachable!(),
670         },
671         OverflowOp::Sub => match new_sty {
672             TyInt(I8) => "llvm.ssub.with.overflow.i8",
673             TyInt(I16) => "llvm.ssub.with.overflow.i16",
674             TyInt(I32) => "llvm.ssub.with.overflow.i32",
675             TyInt(I64) => "llvm.ssub.with.overflow.i64",
676
677             TyUint(U8) => "llvm.usub.with.overflow.i8",
678             TyUint(U16) => "llvm.usub.with.overflow.i16",
679             TyUint(U32) => "llvm.usub.with.overflow.i32",
680             TyUint(U64) => "llvm.usub.with.overflow.i64",
681
682             _ => unreachable!(),
683         },
684         OverflowOp::Mul => match new_sty {
685             TyInt(I8) => "llvm.smul.with.overflow.i8",
686             TyInt(I16) => "llvm.smul.with.overflow.i16",
687             TyInt(I32) => "llvm.smul.with.overflow.i32",
688             TyInt(I64) => "llvm.smul.with.overflow.i64",
689
690             TyUint(U8) => "llvm.umul.with.overflow.i8",
691             TyUint(U16) => "llvm.umul.with.overflow.i16",
692             TyUint(U32) => "llvm.umul.with.overflow.i32",
693             TyUint(U64) => "llvm.umul.with.overflow.i64",
694
695             _ => unreachable!(),
696         },
697     };
698
699     bcx.ccx().get_intrinsic(&name)
700 }