]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/mir/rvalue.rs
Refactor: {Lvalue,Rvalue,Operand}::ty only need the locals' types, not the full &Mir
[rust.git] / src / librustc_trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::ty::layout::{Layout, LayoutTyper};
15 use rustc::mir::tcx::LvalueTy;
16 use rustc::mir;
17 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
18
19 use base;
20 use builder::Builder;
21 use callee;
22 use common::{self, val_ty, C_bool, C_null, C_uint};
23 use common::{C_integral};
24 use adt;
25 use machine;
26 use monomorphize;
27 use type_::Type;
28 use type_of;
29 use tvec;
30 use value::Value;
31
32 use super::MirContext;
33 use super::constant::const_scalar_checked_binop;
34 use super::operand::{OperandRef, OperandValue};
35 use super::lvalue::LvalueRef;
36
37 impl<'a, 'tcx> MirContext<'a, 'tcx> {
38     pub fn trans_rvalue(&mut self,
39                         bcx: Builder<'a, 'tcx>,
40                         dest: LvalueRef<'tcx>,
41                         rvalue: &mir::Rvalue<'tcx>)
42                         -> Builder<'a, 'tcx>
43     {
44         debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
45                Value(dest.llval), rvalue);
46
47         match *rvalue {
48            mir::Rvalue::Use(ref operand) => {
49                let tr_operand = self.trans_operand(&bcx, operand);
50                // FIXME: consider not copying constants through stack. (fixable by translating
51                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
52                self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
53                bcx
54            }
55
56             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
57                 let cast_ty = self.monomorphize(&cast_ty);
58
59                 if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
60                     // into-coerce of a thin pointer to a fat pointer - just
61                     // use the operand path.
62                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
63                     self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
64                     return bcx;
65                 }
66
67                 // Unsize of a nontrivial struct. I would prefer for
68                 // this to be eliminated by MIR translation, but
69                 // `CoerceUnsized` can be passed by a where-clause,
70                 // so the (generic) MIR may not be able to expand it.
71                 let operand = self.trans_operand(&bcx, source);
72                 let operand = operand.pack_if_pair(&bcx);
73                 let llref = match operand.val {
74                     OperandValue::Pair(..) => bug!(),
75                     OperandValue::Immediate(llval) => {
76                         // unsize from an immediate structure. We don't
77                         // really need a temporary alloca here, but
78                         // avoiding it would require us to have
79                         // `coerce_unsized_into` use extractvalue to
80                         // index into the struct, and this case isn't
81                         // important enough for it.
82                         debug!("trans_rvalue: creating ugly alloca");
83                         let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
84                         base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
85                         scratch
86                     }
87                     OperandValue::Ref(llref, align) => {
88                         LvalueRef::new_sized_ty(llref, operand.ty, align)
89                     }
90                 };
91                 base::coerce_unsized_into(&bcx, &llref, &dest);
92                 bcx
93             }
94
95             mir::Rvalue::Repeat(ref elem, ref count) => {
96                 let tr_elem = self.trans_operand(&bcx, elem);
97                 let size = count.as_u64(bcx.tcx().sess.target.uint_type);
98                 let size = C_uint(bcx.ccx, size);
99                 let base = base::get_dataptr(&bcx, dest.llval);
100                 tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| {
101                     self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
102                     bcx.br(loop_bb);
103                 })
104             }
105
106             mir::Rvalue::Aggregate(ref kind, ref operands) => {
107                 match **kind {
108                     mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
109                         let discr = adt_def.discriminant_for_variant(bcx.tcx(), variant_index)
110                            .to_u128_unchecked() as u64;
111                         let dest_ty = dest.ty.to_ty(bcx.tcx());
112                         adt::trans_set_discr(&bcx, dest_ty, dest.llval, discr);
113                         for (i, operand) in operands.iter().enumerate() {
114                             let op = self.trans_operand(&bcx, operand);
115                             // Do not generate stores and GEPis for zero-sized fields.
116                             if !common::type_is_zero_size(bcx.ccx, op.ty) {
117                                 let mut val = LvalueRef::new_sized(
118                                     dest.llval, dest.ty, dest.alignment);
119                                 let field_index = active_field_index.unwrap_or(i);
120                                 val.ty = LvalueTy::Downcast {
121                                     adt_def: adt_def,
122                                     substs: self.monomorphize(&substs),
123                                     variant_index: variant_index,
124                                 };
125                                 let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
126                                 self.store_operand(&bcx, lldest_i, align.to_align(), op);
127                             }
128                         }
129                     },
130                     _ => {
131                         // If this is a tuple or closure, we need to translate GEP indices.
132                         let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
133                         let get_memory_index = |i| {
134                             if let Layout::Univariant { ref variant, .. } = *layout {
135                                 adt::struct_llfields_index(variant, i)
136                             } else {
137                                 i
138                             }
139                         };
140                         let alignment = dest.alignment;
141                         for (i, operand) in operands.iter().enumerate() {
142                             let op = self.trans_operand(&bcx, operand);
143                             // Do not generate stores and GEPis for zero-sized fields.
144                             if !common::type_is_zero_size(bcx.ccx, op.ty) {
145                                 // Note: perhaps this should be StructGep, but
146                                 // note that in some cases the values here will
147                                 // not be structs but arrays.
148                                 let i = get_memory_index(i);
149                                 let dest = bcx.gepi(dest.llval, &[0, i]);
150                                 self.store_operand(&bcx, dest, alignment.to_align(), op);
151                             }
152                         }
153                     }
154                 }
155                 bcx
156             }
157
158             _ => {
159                 assert!(self.rvalue_creates_operand(rvalue));
160                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
161                 self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
162                 bcx
163             }
164         }
165     }
166
167     pub fn trans_rvalue_operand(&mut self,
168                                 bcx: Builder<'a, 'tcx>,
169                                 rvalue: &mir::Rvalue<'tcx>)
170                                 -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
171     {
172         assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
173
174         match *rvalue {
175             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
176                 let operand = self.trans_operand(&bcx, source);
177                 debug!("cast operand is {:?}", operand);
178                 let cast_ty = self.monomorphize(&cast_ty);
179
180                 let val = match *kind {
181                     mir::CastKind::ReifyFnPointer => {
182                         match operand.ty.sty {
183                             ty::TyFnDef(def_id, substs) => {
184                                 OperandValue::Immediate(
185                                     callee::resolve_and_get_fn(bcx.ccx, def_id, substs))
186                             }
187                             _ => {
188                                 bug!("{} cannot be reified to a fn ptr", operand.ty)
189                             }
190                         }
191                     }
192                     mir::CastKind::ClosureFnPointer => {
193                         match operand.ty.sty {
194                             ty::TyClosure(def_id, substs) => {
195                                 let instance = monomorphize::resolve_closure(
196                                     bcx.ccx.shared(), def_id, substs, ty::ClosureKind::FnOnce);
197                                 OperandValue::Immediate(callee::get_fn(bcx.ccx, instance))
198                             }
199                             _ => {
200                                 bug!("{} cannot be cast to a fn ptr", operand.ty)
201                             }
202                         }
203                     }
204                     mir::CastKind::UnsafeFnPointer => {
205                         // this is a no-op at the LLVM level
206                         operand.val
207                     }
208                     mir::CastKind::Unsize => {
209                         // unsize targets other than to a fat pointer currently
210                         // can't be operands.
211                         assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
212
213                         match operand.val {
214                             OperandValue::Pair(lldata, llextra) => {
215                                 // unsize from a fat pointer - this is a
216                                 // "trait-object-to-supertrait" coercion, for
217                                 // example,
218                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
219                                 // So we need to pointercast the base to ensure
220                                 // the types match up.
221                                 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
222                                 let lldata = bcx.pointercast(lldata, llcast_ty);
223                                 OperandValue::Pair(lldata, llextra)
224                             }
225                             OperandValue::Immediate(lldata) => {
226                                 // "standard" unsize
227                                 let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
228                                     operand.ty, cast_ty);
229                                 OperandValue::Pair(lldata, llextra)
230                             }
231                             OperandValue::Ref(..) => {
232                                 bug!("by-ref operand {:?} in trans_rvalue_operand",
233                                      operand);
234                             }
235                         }
236                     }
237                     mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
238                         let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty);
239                         let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty);
240                         if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
241                             if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
242                                 let ll_cft = ll_cast_ty.field_types();
243                                 let ll_fft = ll_from_ty.field_types();
244                                 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
245                                 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
246                                 OperandValue::Pair(data_cast, meta_ptr)
247                             } else { // cast to thin-ptr
248                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
249                                 // pointer-cast of that pointer to desired pointer type.
250                                 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
251                                 OperandValue::Immediate(llval)
252                             }
253                         } else {
254                             bug!("Unexpected non-Pair operand")
255                         }
256                     }
257                     mir::CastKind::Misc => {
258                         debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
259                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
260                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
261                         let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty);
262                         let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty);
263                         let llval = operand.immediate();
264                         let l = bcx.ccx.layout_of(operand.ty);
265                         let signed = if let Layout::CEnum { signed, min, max, .. } = *l {
266                             if max > min {
267                                 // We want `table[e as usize]` to not
268                                 // have bound checks, and this is the most
269                                 // convenient place to put the `assume`.
270
271                                 base::call_assume(&bcx, bcx.icmp(
272                                     llvm::IntULE,
273                                     llval,
274                                     C_integral(common::val_ty(llval), max, false)
275                                 ));
276                             }
277
278                             signed
279                         } else {
280                             operand.ty.is_signed()
281                         };
282
283                         let newval = match (r_t_in, r_t_out) {
284                             (CastTy::Int(_), CastTy::Int(_)) => {
285                                 bcx.intcast(llval, ll_t_out, signed)
286                             }
287                             (CastTy::Float, CastTy::Float) => {
288                                 let srcsz = ll_t_in.float_width();
289                                 let dstsz = ll_t_out.float_width();
290                                 if dstsz > srcsz {
291                                     bcx.fpext(llval, ll_t_out)
292                                 } else if srcsz > dstsz {
293                                     bcx.fptrunc(llval, ll_t_out)
294                                 } else {
295                                     llval
296                                 }
297                             }
298                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
299                             (CastTy::FnPtr, CastTy::Ptr(_)) |
300                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
301                                 bcx.pointercast(llval, ll_t_out),
302                             (CastTy::Ptr(_), CastTy::Int(_)) |
303                             (CastTy::FnPtr, CastTy::Int(_)) =>
304                                 bcx.ptrtoint(llval, ll_t_out),
305                             (CastTy::Int(_), CastTy::Ptr(_)) =>
306                                 bcx.inttoptr(llval, ll_t_out),
307                             (CastTy::Int(_), CastTy::Float) if signed =>
308                                 bcx.sitofp(llval, ll_t_out),
309                             (CastTy::Int(_), CastTy::Float) =>
310                                 bcx.uitofp(llval, ll_t_out),
311                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
312                                 bcx.fptosi(llval, ll_t_out),
313                             (CastTy::Float, CastTy::Int(_)) =>
314                                 bcx.fptoui(llval, ll_t_out),
315                             _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
316                         };
317                         OperandValue::Immediate(newval)
318                     }
319                 };
320                 let operand = OperandRef {
321                     val: val,
322                     ty: cast_ty
323                 };
324                 (bcx, operand)
325             }
326
327             mir::Rvalue::Ref(_, bk, ref lvalue) => {
328                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
329
330                 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
331                 let ref_ty = bcx.tcx().mk_ref(
332                     bcx.tcx().types.re_erased,
333                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
334                 );
335
336                 // Note: lvalues are indirect, so storing the `llval` into the
337                 // destination effectively creates a reference.
338                 let operand = if bcx.ccx.shared().type_is_sized(ty) {
339                     OperandRef {
340                         val: OperandValue::Immediate(tr_lvalue.llval),
341                         ty: ref_ty,
342                     }
343                 } else {
344                     OperandRef {
345                         val: OperandValue::Pair(tr_lvalue.llval,
346                                                 tr_lvalue.llextra),
347                         ty: ref_ty,
348                     }
349                 };
350                 (bcx, operand)
351             }
352
353             mir::Rvalue::Len(ref lvalue) => {
354                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
355                 let operand = OperandRef {
356                     val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
357                     ty: bcx.tcx().types.usize,
358                 };
359                 (bcx, operand)
360             }
361
362             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
363                 let lhs = self.trans_operand(&bcx, lhs);
364                 let rhs = self.trans_operand(&bcx, rhs);
365                 let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
366                     match (lhs.val, rhs.val) {
367                         (OperandValue::Pair(lhs_addr, lhs_extra),
368                          OperandValue::Pair(rhs_addr, rhs_extra)) => {
369                             self.trans_fat_ptr_binop(&bcx, op,
370                                                      lhs_addr, lhs_extra,
371                                                      rhs_addr, rhs_extra,
372                                                      lhs.ty)
373                         }
374                         _ => bug!()
375                     }
376
377                 } else {
378                     self.trans_scalar_binop(&bcx, op,
379                                             lhs.immediate(), rhs.immediate(),
380                                             lhs.ty)
381                 };
382                 let operand = OperandRef {
383                     val: OperandValue::Immediate(llresult),
384                     ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
385                 };
386                 (bcx, operand)
387             }
388             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
389                 let lhs = self.trans_operand(&bcx, lhs);
390                 let rhs = self.trans_operand(&bcx, rhs);
391                 let result = self.trans_scalar_checked_binop(&bcx, op,
392                                                              lhs.immediate(), rhs.immediate(),
393                                                              lhs.ty);
394                 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
395                 let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
396                 let operand = OperandRef {
397                     val: result,
398                     ty: operand_ty
399                 };
400
401                 (bcx, operand)
402             }
403
404             mir::Rvalue::UnaryOp(op, ref operand) => {
405                 let operand = self.trans_operand(&bcx, operand);
406                 let lloperand = operand.immediate();
407                 let is_float = operand.ty.is_fp();
408                 let llval = match op {
409                     mir::UnOp::Not => bcx.not(lloperand),
410                     mir::UnOp::Neg => if is_float {
411                         bcx.fneg(lloperand)
412                     } else {
413                         bcx.neg(lloperand)
414                     }
415                 };
416                 (bcx, OperandRef {
417                     val: OperandValue::Immediate(llval),
418                     ty: operand.ty,
419                 })
420             }
421
422             mir::Rvalue::Discriminant(ref lvalue) => {
423                 let discr_lvalue = self.trans_lvalue(&bcx, lvalue);
424                 let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx());
425                 let discr_ty = rvalue.ty(&self.mir.local_decls, bcx.tcx());
426                 let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty);
427                 let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval,
428                                                   discr_lvalue.alignment, Some(discr_type), true);
429                 (bcx, OperandRef {
430                     val: OperandValue::Immediate(discr),
431                     ty: discr_ty
432                 })
433             }
434
435             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
436                 assert!(bcx.ccx.shared().type_is_sized(ty));
437                 let val = C_uint(bcx.ccx, bcx.ccx.size_of(ty));
438                 let tcx = bcx.tcx();
439                 (bcx, OperandRef {
440                     val: OperandValue::Immediate(val),
441                     ty: tcx.types.usize,
442                 })
443             }
444
445             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
446                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
447                 let llty = type_of::type_of(bcx.ccx, content_ty);
448                 let llsize = machine::llsize_of(bcx.ccx, llty);
449                 let align = bcx.ccx.align_of(content_ty);
450                 let llalign = C_uint(bcx.ccx, align);
451                 let llty_ptr = llty.ptr_to();
452                 let box_ty = bcx.tcx().mk_box(content_ty);
453
454                 // Allocate space:
455                 let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
456                     Ok(id) => id,
457                     Err(s) => {
458                         bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
459                     }
460                 };
461                 let instance = ty::Instance::mono(bcx.tcx(), def_id);
462                 let r = callee::get_fn(bcx.ccx, instance);
463                 let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
464
465                 let operand = OperandRef {
466                     val: OperandValue::Immediate(val),
467                     ty: box_ty,
468                 };
469                 (bcx, operand)
470             }
471             mir::Rvalue::Use(ref operand) => {
472                 let operand = self.trans_operand(&bcx, operand);
473                 (bcx, operand)
474             }
475             mir::Rvalue::Repeat(..) |
476             mir::Rvalue::Aggregate(..) => {
477                 // According to `rvalue_creates_operand`, only ZST
478                 // aggregate rvalues are allowed to be operands.
479                 let ty = rvalue.ty(&self.mir.local_decls, self.ccx.tcx());
480                 (bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty)))
481             }
482         }
483     }
484
485     pub fn trans_scalar_binop(&mut self,
486                               bcx: &Builder<'a, 'tcx>,
487                               op: mir::BinOp,
488                               lhs: ValueRef,
489                               rhs: ValueRef,
490                               input_ty: Ty<'tcx>) -> ValueRef {
491         let is_float = input_ty.is_fp();
492         let is_signed = input_ty.is_signed();
493         let is_nil = input_ty.is_nil();
494         let is_bool = input_ty.is_bool();
495         match op {
496             mir::BinOp::Add => if is_float {
497                 bcx.fadd(lhs, rhs)
498             } else {
499                 bcx.add(lhs, rhs)
500             },
501             mir::BinOp::Sub => if is_float {
502                 bcx.fsub(lhs, rhs)
503             } else {
504                 bcx.sub(lhs, rhs)
505             },
506             mir::BinOp::Mul => if is_float {
507                 bcx.fmul(lhs, rhs)
508             } else {
509                 bcx.mul(lhs, rhs)
510             },
511             mir::BinOp::Div => if is_float {
512                 bcx.fdiv(lhs, rhs)
513             } else if is_signed {
514                 bcx.sdiv(lhs, rhs)
515             } else {
516                 bcx.udiv(lhs, rhs)
517             },
518             mir::BinOp::Rem => if is_float {
519                 bcx.frem(lhs, rhs)
520             } else if is_signed {
521                 bcx.srem(lhs, rhs)
522             } else {
523                 bcx.urem(lhs, rhs)
524             },
525             mir::BinOp::BitOr => bcx.or(lhs, rhs),
526             mir::BinOp::BitAnd => bcx.and(lhs, rhs),
527             mir::BinOp::BitXor => bcx.xor(lhs, rhs),
528             mir::BinOp::Offset => bcx.inbounds_gep(lhs, &[rhs]),
529             mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
530             mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
531             mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
532             mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
533                 C_bool(bcx.ccx, match op {
534                     mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
535                     mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
536                     _ => unreachable!()
537                 })
538             } else if is_float {
539                 bcx.fcmp(
540                     base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
541                     lhs, rhs
542                 )
543             } else {
544                 let (lhs, rhs) = if is_bool {
545                     // FIXME(#36856) -- extend the bools into `i8` because
546                     // LLVM's i1 comparisons are broken.
547                     (bcx.zext(lhs, Type::i8(bcx.ccx)),
548                      bcx.zext(rhs, Type::i8(bcx.ccx)))
549                 } else {
550                     (lhs, rhs)
551                 };
552
553                 bcx.icmp(
554                     base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
555                     lhs, rhs
556                 )
557             }
558         }
559     }
560
561     pub fn trans_fat_ptr_binop(&mut self,
562                                bcx: &Builder<'a, 'tcx>,
563                                op: mir::BinOp,
564                                lhs_addr: ValueRef,
565                                lhs_extra: ValueRef,
566                                rhs_addr: ValueRef,
567                                rhs_extra: ValueRef,
568                                _input_ty: Ty<'tcx>)
569                                -> ValueRef {
570         match op {
571             mir::BinOp::Eq => {
572                 bcx.and(
573                     bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
574                     bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
575                 )
576             }
577             mir::BinOp::Ne => {
578                 bcx.or(
579                     bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
580                     bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
581                 )
582             }
583             mir::BinOp::Le | mir::BinOp::Lt |
584             mir::BinOp::Ge | mir::BinOp::Gt => {
585                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
586                 let (op, strict_op) = match op {
587                     mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
588                     mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
589                     mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
590                     mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
591                     _ => bug!(),
592                 };
593
594                 bcx.or(
595                     bcx.icmp(strict_op, lhs_addr, rhs_addr),
596                     bcx.and(
597                         bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
598                         bcx.icmp(op, lhs_extra, rhs_extra)
599                     )
600                 )
601             }
602             _ => {
603                 bug!("unexpected fat ptr binop");
604             }
605         }
606     }
607
608     pub fn trans_scalar_checked_binop(&mut self,
609                                       bcx: &Builder<'a, 'tcx>,
610                                       op: mir::BinOp,
611                                       lhs: ValueRef,
612                                       rhs: ValueRef,
613                                       input_ty: Ty<'tcx>) -> OperandValue {
614         // This case can currently arise only from functions marked
615         // with #[rustc_inherit_overflow_checks] and inlined from
616         // another crate (mostly core::num generic/#[inline] fns),
617         // while the current crate doesn't use overflow checks.
618         if !bcx.ccx.check_overflow() {
619             let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
620             return OperandValue::Pair(val, C_bool(bcx.ccx, false));
621         }
622
623         // First try performing the operation on constants, which
624         // will only succeed if both operands are constant.
625         // This is necessary to determine when an overflow Assert
626         // will always panic at runtime, and produce a warning.
627         if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
628             return OperandValue::Pair(val, C_bool(bcx.ccx, of));
629         }
630
631         let (val, of) = match op {
632             // These are checked using intrinsics
633             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
634                 let oop = match op {
635                     mir::BinOp::Add => OverflowOp::Add,
636                     mir::BinOp::Sub => OverflowOp::Sub,
637                     mir::BinOp::Mul => OverflowOp::Mul,
638                     _ => unreachable!()
639                 };
640                 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
641                 let res = bcx.call(intrinsic, &[lhs, rhs], None);
642
643                 (bcx.extract_value(res, 0),
644                  bcx.extract_value(res, 1))
645             }
646             mir::BinOp::Shl | mir::BinOp::Shr => {
647                 let lhs_llty = val_ty(lhs);
648                 let rhs_llty = val_ty(rhs);
649                 let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
650                 let outer_bits = bcx.and(rhs, invert_mask);
651
652                 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
653                 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
654
655                 (val, of)
656             }
657             _ => {
658                 bug!("Operator `{:?}` is not a checkable operator", op)
659             }
660         };
661
662         OperandValue::Pair(val, of)
663     }
664
665     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
666         match *rvalue {
667             mir::Rvalue::Ref(..) |
668             mir::Rvalue::Len(..) |
669             mir::Rvalue::Cast(..) | // (*)
670             mir::Rvalue::BinaryOp(..) |
671             mir::Rvalue::CheckedBinaryOp(..) |
672             mir::Rvalue::UnaryOp(..) |
673             mir::Rvalue::Discriminant(..) |
674             mir::Rvalue::NullaryOp(..) |
675             mir::Rvalue::Use(..) => // (*)
676                 true,
677             mir::Rvalue::Repeat(..) |
678             mir::Rvalue::Aggregate(..) => {
679                 let ty = rvalue.ty(&self.mir.local_decls, self.ccx.tcx());
680                 let ty = self.monomorphize(&ty);
681                 common::type_is_zero_size(self.ccx, ty)
682             }
683         }
684
685         // (*) this is only true if the type is suitable
686     }
687 }
688
689 #[derive(Copy, Clone)]
690 enum OverflowOp {
691     Add, Sub, Mul
692 }
693
694 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
695     use syntax::ast::IntTy::*;
696     use syntax::ast::UintTy::*;
697     use rustc::ty::{TyInt, TyUint};
698
699     let tcx = bcx.tcx();
700
701     let new_sty = match ty.sty {
702         TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
703             "16" => TyInt(I16),
704             "32" => TyInt(I32),
705             "64" => TyInt(I64),
706             _ => panic!("unsupported target word size")
707         },
708         TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
709             "16" => TyUint(U16),
710             "32" => TyUint(U32),
711             "64" => TyUint(U64),
712             _ => panic!("unsupported target word size")
713         },
714         ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
715         _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
716     };
717
718     let name = match oop {
719         OverflowOp::Add => match new_sty {
720             TyInt(I8) => "llvm.sadd.with.overflow.i8",
721             TyInt(I16) => "llvm.sadd.with.overflow.i16",
722             TyInt(I32) => "llvm.sadd.with.overflow.i32",
723             TyInt(I64) => "llvm.sadd.with.overflow.i64",
724             TyInt(I128) => "llvm.sadd.with.overflow.i128",
725
726             TyUint(U8) => "llvm.uadd.with.overflow.i8",
727             TyUint(U16) => "llvm.uadd.with.overflow.i16",
728             TyUint(U32) => "llvm.uadd.with.overflow.i32",
729             TyUint(U64) => "llvm.uadd.with.overflow.i64",
730             TyUint(U128) => "llvm.uadd.with.overflow.i128",
731
732             _ => unreachable!(),
733         },
734         OverflowOp::Sub => match new_sty {
735             TyInt(I8) => "llvm.ssub.with.overflow.i8",
736             TyInt(I16) => "llvm.ssub.with.overflow.i16",
737             TyInt(I32) => "llvm.ssub.with.overflow.i32",
738             TyInt(I64) => "llvm.ssub.with.overflow.i64",
739             TyInt(I128) => "llvm.ssub.with.overflow.i128",
740
741             TyUint(U8) => "llvm.usub.with.overflow.i8",
742             TyUint(U16) => "llvm.usub.with.overflow.i16",
743             TyUint(U32) => "llvm.usub.with.overflow.i32",
744             TyUint(U64) => "llvm.usub.with.overflow.i64",
745             TyUint(U128) => "llvm.usub.with.overflow.i128",
746
747             _ => unreachable!(),
748         },
749         OverflowOp::Mul => match new_sty {
750             TyInt(I8) => "llvm.smul.with.overflow.i8",
751             TyInt(I16) => "llvm.smul.with.overflow.i16",
752             TyInt(I32) => "llvm.smul.with.overflow.i32",
753             TyInt(I64) => "llvm.smul.with.overflow.i64",
754             TyInt(I128) => "llvm.smul.with.overflow.i128",
755
756             TyUint(U8) => "llvm.umul.with.overflow.i8",
757             TyUint(U16) => "llvm.umul.with.overflow.i16",
758             TyUint(U32) => "llvm.umul.with.overflow.i32",
759             TyUint(U64) => "llvm.umul.with.overflow.i64",
760             TyUint(U128) => "llvm.umul.with.overflow.i128",
761
762             _ => unreachable!(),
763         },
764     };
765
766     bcx.ccx.get_intrinsic(&name)
767 }