]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/mir/rvalue.rs
hir, mir: Separate HIR expressions / MIR operands from InlineAsm.
[rust.git] / src / librustc_trans / trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::ValueRef;
12 use rustc::middle::ty::{self, Ty};
13 use middle::ty::cast::{CastTy, IntTy};
14 use middle::const_eval::ConstVal;
15 use rustc_const_eval::ConstInt;
16 use rustc::mir::repr as mir;
17
18 use trans::asm;
19 use trans::base;
20 use trans::callee::Callee;
21 use trans::common::{self, C_uint, BlockAndBuilder, Result};
22 use trans::datum::{Datum, Lvalue};
23 use trans::debuginfo::DebugLoc;
24 use trans::declare;
25 use trans::adt;
26 use trans::machine;
27 use trans::type_::Type;
28 use trans::type_of;
29 use trans::tvec;
30 use trans::value::Value;
31 use trans::Disr;
32
33 use super::MirContext;
34 use super::operand::{OperandRef, OperandValue};
35 use super::lvalue::{LvalueRef, get_dataptr, get_meta};
36
37 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
38     pub fn trans_rvalue(&mut self,
39                         bcx: BlockAndBuilder<'bcx, 'tcx>,
40                         dest: LvalueRef<'tcx>,
41                         rvalue: &mir::Rvalue<'tcx>)
42                         -> BlockAndBuilder<'bcx, 'tcx>
43     {
44         debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
45                Value(dest.llval), rvalue);
46
47         match *rvalue {
48            mir::Rvalue::Use(ref operand) => {
49                let tr_operand = self.trans_operand(&bcx, operand);
50                // FIXME: consider not copying constants through stack. (fixable by translating
51                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
52                self.store_operand(&bcx, dest.llval, tr_operand);
53                self.set_operand_dropped(&bcx, operand);
54                bcx
55            }
56
57             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
58                 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
59                     // into-coerce of a thin pointer to a fat pointer - just
60                     // use the operand path.
61                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
62                     self.store_operand(&bcx, dest.llval, temp);
63                     return bcx;
64                 }
65
66                 // Unsize of a nontrivial struct. I would prefer for
67                 // this to be eliminated by MIR translation, but
68                 // `CoerceUnsized` can be passed by a where-clause,
69                 // so the (generic) MIR may not be able to expand it.
70                 let operand = self.trans_operand(&bcx, source);
71                 bcx.with_block(|bcx| {
72                     match operand.val {
73                         OperandValue::FatPtr(..) => unreachable!(),
74                         OperandValue::Immediate(llval) => {
75                             // unsize from an immediate structure. We don't
76                             // really need a temporary alloca here, but
77                             // avoiding it would require us to have
78                             // `coerce_unsized_into` use extractvalue to
79                             // index into the struct, and this case isn't
80                             // important enough for it.
81                             debug!("trans_rvalue: creating ugly alloca");
82                             let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
83                             base::store_ty(bcx, llval, lltemp, operand.ty);
84                             base::coerce_unsized_into(bcx,
85                                                       lltemp, operand.ty,
86                                                       dest.llval, cast_ty);
87                         }
88                         OperandValue::Ref(llref) => {
89                             base::coerce_unsized_into(bcx,
90                                                       llref, operand.ty,
91                                                       dest.llval, cast_ty);
92                         }
93                     }
94                 });
95                 self.set_operand_dropped(&bcx, source);
96                 bcx
97             }
98
99             mir::Rvalue::Repeat(ref elem, ref count) => {
100                 let tr_elem = self.trans_operand(&bcx, elem);
101                 let count = ConstVal::Integral(ConstInt::Usize(count.value));
102                 let size = self.trans_constval(&bcx, &count, bcx.tcx().types.usize).immediate();
103                 let base = get_dataptr(&bcx, dest.llval);
104                 let bcx = bcx.map_block(|block| {
105                     tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
106                         self.store_operand_direct(block, llslot, tr_elem);
107                         block
108                     })
109                 });
110                 self.set_operand_dropped(&bcx, elem);
111                 bcx
112             }
113
114             mir::Rvalue::Aggregate(ref kind, ref operands) => {
115                 match *kind {
116                     mir::AggregateKind::Adt(adt_def, index, _) => {
117                         let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
118                         let disr = Disr::from(adt_def.variants[index].disr_val);
119                         bcx.with_block(|bcx| {
120                             adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
121                         });
122                         for (i, operand) in operands.iter().enumerate() {
123                             let op = self.trans_operand(&bcx, operand);
124                             // Do not generate stores and GEPis for zero-sized fields.
125                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
126                                 let val = adt::MaybeSizedValue::sized(dest.llval);
127                                 let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
128                                                                             val, disr, i);
129                                 self.store_operand(&bcx, lldest_i, op);
130                             }
131                             self.set_operand_dropped(&bcx, operand);
132                         }
133                     },
134                     _ => {
135                         // FIXME Shouldn't need to manually trigger closure instantiations.
136                         if let mir::AggregateKind::Closure(def_id, substs) = *kind {
137                             use rustc_front::hir;
138                             use syntax::ast::DUMMY_NODE_ID;
139                             use syntax::codemap::DUMMY_SP;
140                             use syntax::ptr::P;
141                             use trans::closure;
142
143                             closure::trans_closure_expr(closure::Dest::Ignore(bcx.ccx()),
144                                                         &hir::FnDecl {
145                                                             inputs: P::new(),
146                                                             output: hir::NoReturn(DUMMY_SP),
147                                                             variadic: false
148                                                         },
149                                                         &hir::Block {
150                                                             stmts: P::new(),
151                                                             expr: None,
152                                                             id: DUMMY_NODE_ID,
153                                                             rules: hir::DefaultBlock,
154                                                             span: DUMMY_SP
155                                                         },
156                                                         DUMMY_NODE_ID, def_id,
157                                                         &bcx.monomorphize(substs));
158                         }
159
160                         for (i, operand) in operands.iter().enumerate() {
161                             let op = self.trans_operand(&bcx, operand);
162                             // Do not generate stores and GEPis for zero-sized fields.
163                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
164                                 // Note: perhaps this should be StructGep, but
165                                 // note that in some cases the values here will
166                                 // not be structs but arrays.
167                                 let dest = bcx.gepi(dest.llval, &[0, i]);
168                                 self.store_operand(&bcx, dest, op);
169                             }
170                             self.set_operand_dropped(&bcx, operand);
171                         }
172                     }
173                 }
174                 bcx
175             }
176
177             mir::Rvalue::Slice { ref input, from_start, from_end } => {
178                 let ccx = bcx.ccx();
179                 let input = self.trans_lvalue(&bcx, input);
180                 let ty = input.ty.to_ty(bcx.tcx());
181                 let (llbase1, lllen) = match ty.sty {
182                     ty::TyArray(_, n) => {
183                         (bcx.gepi(input.llval, &[0, from_start]), C_uint(ccx, n))
184                     }
185                     ty::TySlice(_) | ty::TyStr => {
186                         (bcx.gepi(input.llval, &[from_start]), input.llextra)
187                     }
188                     _ => unreachable!("cannot slice {}", ty)
189                 };
190                 let adj = C_uint(ccx, from_start + from_end);
191                 let lllen1 = bcx.sub(lllen, adj);
192                 bcx.store(llbase1, get_dataptr(&bcx, dest.llval));
193                 bcx.store(lllen1, get_meta(&bcx, dest.llval));
194                 bcx
195             }
196
197             mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
198                 let outputs = outputs.iter().map(|output| {
199                     let lvalue = self.trans_lvalue(&bcx, output);
200                     Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
201                                Lvalue::new("out"))
202                 }).collect();
203
204                 let input_vals = inputs.iter().map(|input| {
205                     self.trans_operand(&bcx, input).immediate()
206                 }).collect();
207
208                 bcx.with_block(|bcx| {
209                     asm::trans_inline_asm(bcx, asm, outputs, input_vals);
210                 });
211
212                 for input in inputs {
213                     self.set_operand_dropped(&bcx, input);
214                 }
215                 bcx
216             }
217
218             _ => {
219                 assert!(rvalue_creates_operand(rvalue));
220                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
221                 self.store_operand(&bcx, dest.llval, temp);
222                 bcx
223             }
224         }
225     }
226
227     pub fn trans_rvalue_operand(&mut self,
228                                 bcx: BlockAndBuilder<'bcx, 'tcx>,
229                                 rvalue: &mir::Rvalue<'tcx>)
230                                 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
231     {
232         assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
233
234         match *rvalue {
235             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
236                 let operand = self.trans_operand(&bcx, source);
237                 debug!("cast operand is {:?}", operand);
238                 let cast_ty = bcx.monomorphize(&cast_ty);
239
240                 let val = match *kind {
241                     mir::CastKind::ReifyFnPointer => {
242                         match operand.ty.sty {
243                             ty::TyFnDef(def_id, substs, _) => {
244                                 OperandValue::Immediate(
245                                     Callee::def(bcx.ccx(), def_id, substs)
246                                         .reify(bcx.ccx()).val)
247                             }
248                             _ => {
249                                 unreachable!("{} cannot be reified to a fn ptr", operand.ty)
250                             }
251                         }
252                     }
253                     mir::CastKind::UnsafeFnPointer => {
254                         // this is a no-op at the LLVM level
255                         operand.val
256                     }
257                     mir::CastKind::Unsize => {
258                         // unsize targets other than to a fat pointer currently
259                         // can't be operands.
260                         assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
261
262                         match operand.val {
263                             OperandValue::FatPtr(..) => {
264                                 // unsize from a fat pointer - this is a
265                                 // "trait-object-to-supertrait" coercion, for
266                                 // example,
267                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
268                                 // and is a no-op at the LLVM level
269                                 self.set_operand_dropped(&bcx, source);
270                                 operand.val
271                             }
272                             OperandValue::Immediate(lldata) => {
273                                 // "standard" unsize
274                                 let (lldata, llextra) = bcx.with_block(|bcx| {
275                                     base::unsize_thin_ptr(bcx, lldata,
276                                                           operand.ty, cast_ty)
277                                 });
278                                 self.set_operand_dropped(&bcx, source);
279                                 OperandValue::FatPtr(lldata, llextra)
280                             }
281                             OperandValue::Ref(_) => {
282                                 bcx.sess().bug(
283                                     &format!("by-ref operand {:?} in trans_rvalue_operand",
284                                              operand));
285                             }
286                         }
287                     }
288                     mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
289                         debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
290                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
291                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
292                         let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
293                         let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
294                         let llval = operand.immediate();
295                         let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
296                             let repr = adt::represent_type(bcx.ccx(), operand.ty);
297                             adt::is_discr_signed(&repr)
298                         } else {
299                             operand.ty.is_signed()
300                         };
301
302                         let newval = match (r_t_in, r_t_out) {
303                             (CastTy::Int(_), CastTy::Int(_)) => {
304                                 let srcsz = ll_t_in.int_width();
305                                 let dstsz = ll_t_out.int_width();
306                                 if srcsz == dstsz {
307                                     bcx.bitcast(llval, ll_t_out)
308                                 } else if srcsz > dstsz {
309                                     bcx.trunc(llval, ll_t_out)
310                                 } else if signed {
311                                     bcx.sext(llval, ll_t_out)
312                                 } else {
313                                     bcx.zext(llval, ll_t_out)
314                                 }
315                             }
316                             (CastTy::Float, CastTy::Float) => {
317                                 let srcsz = ll_t_in.float_width();
318                                 let dstsz = ll_t_out.float_width();
319                                 if dstsz > srcsz {
320                                     bcx.fpext(llval, ll_t_out)
321                                 } else if srcsz > dstsz {
322                                     bcx.fptrunc(llval, ll_t_out)
323                                 } else {
324                                     llval
325                                 }
326                             }
327                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
328                             (CastTy::FnPtr, CastTy::Ptr(_)) |
329                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
330                                 bcx.pointercast(llval, ll_t_out),
331                             (CastTy::Ptr(_), CastTy::Int(_)) |
332                             (CastTy::FnPtr, CastTy::Int(_)) =>
333                                 bcx.ptrtoint(llval, ll_t_out),
334                             (CastTy::Int(_), CastTy::Ptr(_)) =>
335                                 bcx.inttoptr(llval, ll_t_out),
336                             (CastTy::Int(_), CastTy::Float) if signed =>
337                                 bcx.sitofp(llval, ll_t_out),
338                             (CastTy::Int(_), CastTy::Float) =>
339                                 bcx.uitofp(llval, ll_t_out),
340                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
341                                 bcx.fptosi(llval, ll_t_out),
342                             (CastTy::Float, CastTy::Int(_)) =>
343                                 bcx.fptoui(llval, ll_t_out),
344                             _ => bcx.ccx().sess().bug(
345                                 &format!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
346                             )
347                         };
348                         OperandValue::Immediate(newval)
349                     }
350                     mir::CastKind::Misc => { // Casts from a fat-ptr.
351                         let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
352                         let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
353                         if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
354                             if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
355                                 let ll_cft = ll_cast_ty.field_types();
356                                 let ll_fft = ll_from_ty.field_types();
357                                 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
358                                 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
359                                 OperandValue::FatPtr(data_cast, meta_ptr)
360                             } else { // cast to thin-ptr
361                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
362                                 // pointer-cast of that pointer to desired pointer type.
363                                 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
364                                 OperandValue::Immediate(llval)
365                             }
366                         } else {
367                             panic!("Unexpected non-FatPtr operand")
368                         }
369                     }
370                 };
371                 let operand = OperandRef {
372                     val: val,
373                     ty: cast_ty
374                 };
375                 (bcx, operand)
376             }
377
378             mir::Rvalue::Ref(_, bk, ref lvalue) => {
379                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
380
381                 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
382                 let ref_ty = bcx.tcx().mk_ref(
383                     bcx.tcx().mk_region(ty::ReStatic),
384                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
385                 );
386
387                 // Note: lvalues are indirect, so storing the `llval` into the
388                 // destination effectively creates a reference.
389                 let operand = if common::type_is_sized(bcx.tcx(), ty) {
390                     OperandRef {
391                         val: OperandValue::Immediate(tr_lvalue.llval),
392                         ty: ref_ty,
393                     }
394                 } else {
395                     OperandRef {
396                         val: OperandValue::FatPtr(tr_lvalue.llval,
397                                                   tr_lvalue.llextra),
398                         ty: ref_ty,
399                     }
400                 };
401                 (bcx, operand)
402             }
403
404             mir::Rvalue::Len(ref lvalue) => {
405                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
406                 let operand = OperandRef {
407                     val: OperandValue::Immediate(self.lvalue_len(&bcx, tr_lvalue)),
408                     ty: bcx.tcx().types.usize,
409                 };
410                 (bcx, operand)
411             }
412
413             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
414                 let lhs = self.trans_operand(&bcx, lhs);
415                 let rhs = self.trans_operand(&bcx, rhs);
416                 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
417                     match (lhs.val, rhs.val) {
418                         (OperandValue::FatPtr(lhs_addr, lhs_extra),
419                          OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
420                             bcx.with_block(|bcx| {
421                                 base::compare_fat_ptrs(bcx,
422                                                        lhs_addr, lhs_extra,
423                                                        rhs_addr, rhs_extra,
424                                                        lhs.ty, op.to_hir_binop(),
425                                                        DebugLoc::None)
426                             })
427                         }
428                         _ => unreachable!()
429                     }
430
431                 } else {
432                     self.trans_scalar_binop(&bcx, op,
433                                             lhs.immediate(), rhs.immediate(),
434                                             lhs.ty)
435                 };
436                 let operand = OperandRef {
437                     val: OperandValue::Immediate(llresult),
438                     ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
439                 };
440                 (bcx, operand)
441             }
442
443             mir::Rvalue::UnaryOp(op, ref operand) => {
444                 let operand = self.trans_operand(&bcx, operand);
445                 let lloperand = operand.immediate();
446                 let is_float = operand.ty.is_fp();
447                 let llval = match op {
448                     mir::UnOp::Not => bcx.not(lloperand),
449                     mir::UnOp::Neg => if is_float {
450                         bcx.fneg(lloperand)
451                     } else {
452                         bcx.neg(lloperand)
453                     }
454                 };
455                 (bcx, OperandRef {
456                     val: OperandValue::Immediate(llval),
457                     ty: operand.ty,
458                 })
459             }
460
461             mir::Rvalue::Box(content_ty) => {
462                 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
463                 let llty = type_of::type_of(bcx.ccx(), content_ty);
464                 let llsize = machine::llsize_of(bcx.ccx(), llty);
465                 let align = type_of::align_of(bcx.ccx(), content_ty);
466                 let llalign = C_uint(bcx.ccx(), align);
467                 let llty_ptr = llty.ptr_to();
468                 let box_ty = bcx.tcx().mk_box(content_ty);
469                 let mut llval = None;
470                 let bcx = bcx.map_block(|bcx| {
471                     let Result { bcx, val } = base::malloc_raw_dyn(bcx,
472                                                                    llty_ptr,
473                                                                    box_ty,
474                                                                    llsize,
475                                                                    llalign,
476                                                                    DebugLoc::None);
477                     llval = Some(val);
478                     bcx
479                 });
480                 let operand = OperandRef {
481                     val: OperandValue::Immediate(llval.unwrap()),
482                     ty: box_ty,
483                 };
484                 (bcx, operand)
485             }
486
487             mir::Rvalue::Use(..) |
488             mir::Rvalue::Repeat(..) |
489             mir::Rvalue::Aggregate(..) |
490             mir::Rvalue::Slice { .. } |
491             mir::Rvalue::InlineAsm { .. } => {
492                 bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
493             }
494         }
495     }
496
497     pub fn trans_scalar_binop(&mut self,
498                               bcx: &BlockAndBuilder<'bcx, 'tcx>,
499                               op: mir::BinOp,
500                               lhs: ValueRef,
501                               rhs: ValueRef,
502                               input_ty: Ty<'tcx>) -> ValueRef {
503         let is_float = input_ty.is_fp();
504         let is_signed = input_ty.is_signed();
505         match op {
506             mir::BinOp::Add => if is_float {
507                 bcx.fadd(lhs, rhs)
508             } else {
509                 bcx.add(lhs, rhs)
510             },
511             mir::BinOp::Sub => if is_float {
512                 bcx.fsub(lhs, rhs)
513             } else {
514                 bcx.sub(lhs, rhs)
515             },
516             mir::BinOp::Mul => if is_float {
517                 bcx.fmul(lhs, rhs)
518             } else {
519                 bcx.mul(lhs, rhs)
520             },
521             mir::BinOp::Div => if is_float {
522                 bcx.fdiv(lhs, rhs)
523             } else if is_signed {
524                 bcx.sdiv(lhs, rhs)
525             } else {
526                 bcx.udiv(lhs, rhs)
527             },
528             mir::BinOp::Rem => if is_float {
529                 // LLVM currently always lowers the `frem` instructions appropriate
530                 // library calls typically found in libm. Notably f64 gets wired up
531                 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
532                 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
533                 // instead just an inline function in a header that goes up to a
534                 // f64, uses `fmod`, and then comes back down to a f32.
535                 //
536                 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
537                 // still unconditionally lower frem instructions over 32-bit floats
538                 // to a call to `fmodf`. To work around this we special case MSVC
539                 // 32-bit float rem instructions and instead do the call out to
540                 // `fmod` ourselves.
541                 //
542                 // Note that this is currently duplicated with src/libcore/ops.rs
543                 // which does the same thing, and it would be nice to perhaps unify
544                 // these two implementations one day! Also note that we call `fmod`
545                 // for both 32 and 64-bit floats because if we emit any FRem
546                 // instruction at all then LLVM is capable of optimizing it into a
547                 // 32-bit FRem (which we're trying to avoid).
548                 let tcx = bcx.tcx();
549                 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
550                     tcx.sess.target.target.arch == "x86";
551                 if use_fmod {
552                     let f64t = Type::f64(bcx.ccx());
553                     let fty = Type::func(&[f64t, f64t], &f64t);
554                     let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty);
555                     if input_ty == tcx.types.f32 {
556                         let lllhs = bcx.fpext(lhs, f64t);
557                         let llrhs = bcx.fpext(rhs, f64t);
558                         let llres = bcx.call(llfn, &[lllhs, llrhs], None);
559                         bcx.fptrunc(llres, Type::f32(bcx.ccx()))
560                     } else {
561                         bcx.call(llfn, &[lhs, rhs], None)
562                     }
563                 } else {
564                     bcx.frem(lhs, rhs)
565                 }
566             } else if is_signed {
567                 bcx.srem(lhs, rhs)
568             } else {
569                 bcx.urem(lhs, rhs)
570             },
571             mir::BinOp::BitOr => bcx.or(lhs, rhs),
572             mir::BinOp::BitAnd => bcx.and(lhs, rhs),
573             mir::BinOp::BitXor => bcx.xor(lhs, rhs),
574             mir::BinOp::Shl => {
575                 bcx.with_block(|bcx| {
576                     common::build_unchecked_lshift(bcx,
577                                                    lhs,
578                                                    rhs,
579                                                    DebugLoc::None)
580                 })
581             }
582             mir::BinOp::Shr => {
583                 bcx.with_block(|bcx| {
584                     common::build_unchecked_rshift(bcx,
585                                                    input_ty,
586                                                    lhs,
587                                                    rhs,
588                                                    DebugLoc::None)
589                 })
590             }
591             mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
592             mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
593                 bcx.with_block(|bcx| {
594                     base::compare_scalar_types(bcx, lhs, rhs, input_ty,
595                                                op.to_hir_binop(), DebugLoc::None)
596                 })
597             }
598         }
599     }
600 }
601
602 pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
603     match *rvalue {
604         mir::Rvalue::Ref(..) |
605         mir::Rvalue::Len(..) |
606         mir::Rvalue::Cast(..) | // (*)
607         mir::Rvalue::BinaryOp(..) |
608         mir::Rvalue::UnaryOp(..) |
609         mir::Rvalue::Box(..) =>
610             true,
611         mir::Rvalue::Use(..) | // (**)
612         mir::Rvalue::Repeat(..) |
613         mir::Rvalue::Aggregate(..) |
614         mir::Rvalue::Slice { .. } |
615         mir::Rvalue::InlineAsm { .. } =>
616             false,
617     }
618
619     // (*) this is only true if the type is suitable
620     // (**) we need to zero-out the source operand after moving, so we are restricted to either
621     // ensuring all users of `Use` zero it out themselves or not allowing to “create” operand for
622     // it.
623 }