]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/mir/rvalue.rs
mir: Translate Rvalue::Slice without relying on tvec.
[rust.git] / src / librustc_trans / trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::ValueRef;
12 use rustc::middle::ty::{self, Ty};
13 use middle::ty::cast::{CastTy, IntTy};
14 use middle::const_eval::ConstVal;
15 use rustc_const_eval::ConstInt;
16 use rustc::mir::repr as mir;
17
18 use trans::asm;
19 use trans::base;
20 use trans::callee::Callee;
21 use trans::common::{self, C_uint, BlockAndBuilder, Result};
22 use trans::debuginfo::DebugLoc;
23 use trans::declare;
24 use trans::adt;
25 use trans::machine;
26 use trans::type_::Type;
27 use trans::type_of;
28 use trans::tvec;
29 use trans::value::Value;
30 use trans::Disr;
31
32 use super::MirContext;
33 use super::operand::{OperandRef, OperandValue};
34 use super::lvalue::{LvalueRef, get_dataptr, get_meta};
35
36 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
37     pub fn trans_rvalue(&mut self,
38                         bcx: BlockAndBuilder<'bcx, 'tcx>,
39                         dest: LvalueRef<'tcx>,
40                         rvalue: &mir::Rvalue<'tcx>)
41                         -> BlockAndBuilder<'bcx, 'tcx>
42     {
43         debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
44                Value(dest.llval), rvalue);
45
46         match *rvalue {
47            mir::Rvalue::Use(ref operand) => {
48                let tr_operand = self.trans_operand(&bcx, operand);
49                // FIXME: consider not copying constants through stack. (fixable by translating
50                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
51                self.store_operand(&bcx, dest.llval, tr_operand);
52                self.set_operand_dropped(&bcx, operand);
53                bcx
54            }
55
56             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
57                 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
58                     // into-coerce of a thin pointer to a fat pointer - just
59                     // use the operand path.
60                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
61                     self.store_operand(&bcx, dest.llval, temp);
62                     return bcx;
63                 }
64
65                 // Unsize of a nontrivial struct. I would prefer for
66                 // this to be eliminated by MIR translation, but
67                 // `CoerceUnsized` can be passed by a where-clause,
68                 // so the (generic) MIR may not be able to expand it.
69                 let operand = self.trans_operand(&bcx, source);
70                 bcx.with_block(|bcx| {
71                     match operand.val {
72                         OperandValue::FatPtr(..) => unreachable!(),
73                         OperandValue::Immediate(llval) => {
74                             // unsize from an immediate structure. We don't
75                             // really need a temporary alloca here, but
76                             // avoiding it would require us to have
77                             // `coerce_unsized_into` use extractvalue to
78                             // index into the struct, and this case isn't
79                             // important enough for it.
80                             debug!("trans_rvalue: creating ugly alloca");
81                             let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
82                             base::store_ty(bcx, llval, lltemp, operand.ty);
83                             base::coerce_unsized_into(bcx,
84                                                       lltemp, operand.ty,
85                                                       dest.llval, cast_ty);
86                         }
87                         OperandValue::Ref(llref) => {
88                             base::coerce_unsized_into(bcx,
89                                                       llref, operand.ty,
90                                                       dest.llval, cast_ty);
91                         }
92                     }
93                 });
94                 self.set_operand_dropped(&bcx, source);
95                 bcx
96             }
97
98             mir::Rvalue::Repeat(ref elem, ref count) => {
99                 let tr_elem = self.trans_operand(&bcx, elem);
100                 let count = ConstVal::Integral(ConstInt::Usize(count.value));
101                 let size = self.trans_constval(&bcx, &count, bcx.tcx().types.usize).immediate();
102                 let base = get_dataptr(&bcx, dest.llval);
103                 let bcx = bcx.map_block(|block| {
104                     tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
105                         self.store_operand_direct(block, llslot, tr_elem);
106                         block
107                     })
108                 });
109                 self.set_operand_dropped(&bcx, elem);
110                 bcx
111             }
112
113             mir::Rvalue::Aggregate(ref kind, ref operands) => {
114                 match *kind {
115                     mir::AggregateKind::Adt(adt_def, index, _) => {
116                         let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
117                         let disr = Disr::from(adt_def.variants[index].disr_val);
118                         bcx.with_block(|bcx| {
119                             adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
120                         });
121                         for (i, operand) in operands.iter().enumerate() {
122                             let op = self.trans_operand(&bcx, operand);
123                             // Do not generate stores and GEPis for zero-sized fields.
124                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
125                                 let val = adt::MaybeSizedValue::sized(dest.llval);
126                                 let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
127                                                                             val, disr, i);
128                                 self.store_operand(&bcx, lldest_i, op);
129                             }
130                             self.set_operand_dropped(&bcx, operand);
131                         }
132                     },
133                     _ => {
134                         // FIXME Shouldn't need to manually trigger closure instantiations.
135                         if let mir::AggregateKind::Closure(def_id, substs) = *kind {
136                             use rustc_front::hir;
137                             use syntax::ast::DUMMY_NODE_ID;
138                             use syntax::codemap::DUMMY_SP;
139                             use syntax::ptr::P;
140                             use trans::closure;
141
142                             closure::trans_closure_expr(closure::Dest::Ignore(bcx.ccx()),
143                                                         &hir::FnDecl {
144                                                             inputs: P::new(),
145                                                             output: hir::NoReturn(DUMMY_SP),
146                                                             variadic: false
147                                                         },
148                                                         &hir::Block {
149                                                             stmts: P::new(),
150                                                             expr: None,
151                                                             id: DUMMY_NODE_ID,
152                                                             rules: hir::DefaultBlock,
153                                                             span: DUMMY_SP
154                                                         },
155                                                         DUMMY_NODE_ID, def_id,
156                                                         &bcx.monomorphize(substs));
157                         }
158
159                         for (i, operand) in operands.iter().enumerate() {
160                             let op = self.trans_operand(&bcx, operand);
161                             // Do not generate stores and GEPis for zero-sized fields.
162                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
163                                 // Note: perhaps this should be StructGep, but
164                                 // note that in some cases the values here will
165                                 // not be structs but arrays.
166                                 let dest = bcx.gepi(dest.llval, &[0, i]);
167                                 self.store_operand(&bcx, dest, op);
168                             }
169                             self.set_operand_dropped(&bcx, operand);
170                         }
171                     }
172                 }
173                 bcx
174             }
175
176             mir::Rvalue::Slice { ref input, from_start, from_end } => {
177                 let ccx = bcx.ccx();
178                 let input = self.trans_lvalue(&bcx, input);
179                 let ty = input.ty.to_ty(bcx.tcx());
180                 let (llbase1, lllen) = match ty.sty {
181                     ty::TyArray(_, n) => {
182                         (bcx.gepi(input.llval, &[0, from_start]), C_uint(ccx, n))
183                     }
184                     ty::TySlice(_) | ty::TyStr => {
185                         (bcx.gepi(input.llval, &[from_start]), input.llextra)
186                     }
187                     _ => unreachable!("cannot slice {}", ty)
188                 };
189                 let adj = C_uint(ccx, from_start + from_end);
190                 let lllen1 = bcx.sub(lllen, adj);
191                 bcx.store(llbase1, get_dataptr(&bcx, dest.llval));
192                 bcx.store(lllen1, get_meta(&bcx, dest.llval));
193                 bcx
194             }
195
196             mir::Rvalue::InlineAsm(ref inline_asm) => {
197                 bcx.map_block(|bcx| {
198                     asm::trans_inline_asm(bcx, inline_asm)
199                 })
200             }
201
202             _ => {
203                 assert!(rvalue_creates_operand(rvalue));
204                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
205                 self.store_operand(&bcx, dest.llval, temp);
206                 bcx
207             }
208         }
209     }
210
211     pub fn trans_rvalue_operand(&mut self,
212                                 bcx: BlockAndBuilder<'bcx, 'tcx>,
213                                 rvalue: &mir::Rvalue<'tcx>)
214                                 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
215     {
216         assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
217
218         match *rvalue {
219             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
220                 let operand = self.trans_operand(&bcx, source);
221                 debug!("cast operand is {:?}", operand);
222                 let cast_ty = bcx.monomorphize(&cast_ty);
223
224                 let val = match *kind {
225                     mir::CastKind::ReifyFnPointer => {
226                         match operand.ty.sty {
227                             ty::TyFnDef(def_id, substs, _) => {
228                                 OperandValue::Immediate(
229                                     Callee::def(bcx.ccx(), def_id, substs)
230                                         .reify(bcx.ccx()).val)
231                             }
232                             _ => {
233                                 unreachable!("{} cannot be reified to a fn ptr", operand.ty)
234                             }
235                         }
236                     }
237                     mir::CastKind::UnsafeFnPointer => {
238                         // this is a no-op at the LLVM level
239                         operand.val
240                     }
241                     mir::CastKind::Unsize => {
242                         // unsize targets other than to a fat pointer currently
243                         // can't be operands.
244                         assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
245
246                         match operand.val {
247                             OperandValue::FatPtr(..) => {
248                                 // unsize from a fat pointer - this is a
249                                 // "trait-object-to-supertrait" coercion, for
250                                 // example,
251                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
252                                 // and is a no-op at the LLVM level
253                                 self.set_operand_dropped(&bcx, source);
254                                 operand.val
255                             }
256                             OperandValue::Immediate(lldata) => {
257                                 // "standard" unsize
258                                 let (lldata, llextra) = bcx.with_block(|bcx| {
259                                     base::unsize_thin_ptr(bcx, lldata,
260                                                           operand.ty, cast_ty)
261                                 });
262                                 self.set_operand_dropped(&bcx, source);
263                                 OperandValue::FatPtr(lldata, llextra)
264                             }
265                             OperandValue::Ref(_) => {
266                                 bcx.sess().bug(
267                                     &format!("by-ref operand {:?} in trans_rvalue_operand",
268                                              operand));
269                             }
270                         }
271                     }
272                     mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
273                         debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
274                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
275                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
276                         let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
277                         let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
278                         let llval = operand.immediate();
279                         let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
280                             let repr = adt::represent_type(bcx.ccx(), operand.ty);
281                             adt::is_discr_signed(&repr)
282                         } else {
283                             operand.ty.is_signed()
284                         };
285
286                         let newval = match (r_t_in, r_t_out) {
287                             (CastTy::Int(_), CastTy::Int(_)) => {
288                                 let srcsz = ll_t_in.int_width();
289                                 let dstsz = ll_t_out.int_width();
290                                 if srcsz == dstsz {
291                                     bcx.bitcast(llval, ll_t_out)
292                                 } else if srcsz > dstsz {
293                                     bcx.trunc(llval, ll_t_out)
294                                 } else if signed {
295                                     bcx.sext(llval, ll_t_out)
296                                 } else {
297                                     bcx.zext(llval, ll_t_out)
298                                 }
299                             }
300                             (CastTy::Float, CastTy::Float) => {
301                                 let srcsz = ll_t_in.float_width();
302                                 let dstsz = ll_t_out.float_width();
303                                 if dstsz > srcsz {
304                                     bcx.fpext(llval, ll_t_out)
305                                 } else if srcsz > dstsz {
306                                     bcx.fptrunc(llval, ll_t_out)
307                                 } else {
308                                     llval
309                                 }
310                             }
311                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
312                             (CastTy::FnPtr, CastTy::Ptr(_)) |
313                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
314                                 bcx.pointercast(llval, ll_t_out),
315                             (CastTy::Ptr(_), CastTy::Int(_)) |
316                             (CastTy::FnPtr, CastTy::Int(_)) =>
317                                 bcx.ptrtoint(llval, ll_t_out),
318                             (CastTy::Int(_), CastTy::Ptr(_)) =>
319                                 bcx.inttoptr(llval, ll_t_out),
320                             (CastTy::Int(_), CastTy::Float) if signed =>
321                                 bcx.sitofp(llval, ll_t_out),
322                             (CastTy::Int(_), CastTy::Float) =>
323                                 bcx.uitofp(llval, ll_t_out),
324                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
325                                 bcx.fptosi(llval, ll_t_out),
326                             (CastTy::Float, CastTy::Int(_)) =>
327                                 bcx.fptoui(llval, ll_t_out),
328                             _ => bcx.ccx().sess().bug(
329                                 &format!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
330                             )
331                         };
332                         OperandValue::Immediate(newval)
333                     }
334                     mir::CastKind::Misc => { // Casts from a fat-ptr.
335                         let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
336                         let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
337                         if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
338                             if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
339                                 let ll_cft = ll_cast_ty.field_types();
340                                 let ll_fft = ll_from_ty.field_types();
341                                 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
342                                 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
343                                 OperandValue::FatPtr(data_cast, meta_ptr)
344                             } else { // cast to thin-ptr
345                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
346                                 // pointer-cast of that pointer to desired pointer type.
347                                 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
348                                 OperandValue::Immediate(llval)
349                             }
350                         } else {
351                             panic!("Unexpected non-FatPtr operand")
352                         }
353                     }
354                 };
355                 let operand = OperandRef {
356                     val: val,
357                     ty: cast_ty
358                 };
359                 (bcx, operand)
360             }
361
362             mir::Rvalue::Ref(_, bk, ref lvalue) => {
363                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
364
365                 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
366                 let ref_ty = bcx.tcx().mk_ref(
367                     bcx.tcx().mk_region(ty::ReStatic),
368                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
369                 );
370
371                 // Note: lvalues are indirect, so storing the `llval` into the
372                 // destination effectively creates a reference.
373                 let operand = if common::type_is_sized(bcx.tcx(), ty) {
374                     OperandRef {
375                         val: OperandValue::Immediate(tr_lvalue.llval),
376                         ty: ref_ty,
377                     }
378                 } else {
379                     OperandRef {
380                         val: OperandValue::FatPtr(tr_lvalue.llval,
381                                                   tr_lvalue.llextra),
382                         ty: ref_ty,
383                     }
384                 };
385                 (bcx, operand)
386             }
387
388             mir::Rvalue::Len(ref lvalue) => {
389                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
390                 let operand = OperandRef {
391                     val: OperandValue::Immediate(self.lvalue_len(&bcx, tr_lvalue)),
392                     ty: bcx.tcx().types.usize,
393                 };
394                 (bcx, operand)
395             }
396
397             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
398                 let lhs = self.trans_operand(&bcx, lhs);
399                 let rhs = self.trans_operand(&bcx, rhs);
400                 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
401                     match (lhs.val, rhs.val) {
402                         (OperandValue::FatPtr(lhs_addr, lhs_extra),
403                          OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
404                             bcx.with_block(|bcx| {
405                                 base::compare_fat_ptrs(bcx,
406                                                        lhs_addr, lhs_extra,
407                                                        rhs_addr, rhs_extra,
408                                                        lhs.ty, op.to_hir_binop(),
409                                                        DebugLoc::None)
410                             })
411                         }
412                         _ => unreachable!()
413                     }
414
415                 } else {
416                     self.trans_scalar_binop(&bcx, op,
417                                             lhs.immediate(), rhs.immediate(),
418                                             lhs.ty)
419                 };
420                 let operand = OperandRef {
421                     val: OperandValue::Immediate(llresult),
422                     ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
423                 };
424                 (bcx, operand)
425             }
426
427             mir::Rvalue::UnaryOp(op, ref operand) => {
428                 let operand = self.trans_operand(&bcx, operand);
429                 let lloperand = operand.immediate();
430                 let is_float = operand.ty.is_fp();
431                 let llval = match op {
432                     mir::UnOp::Not => bcx.not(lloperand),
433                     mir::UnOp::Neg => if is_float {
434                         bcx.fneg(lloperand)
435                     } else {
436                         bcx.neg(lloperand)
437                     }
438                 };
439                 (bcx, OperandRef {
440                     val: OperandValue::Immediate(llval),
441                     ty: operand.ty,
442                 })
443             }
444
445             mir::Rvalue::Box(content_ty) => {
446                 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
447                 let llty = type_of::type_of(bcx.ccx(), content_ty);
448                 let llsize = machine::llsize_of(bcx.ccx(), llty);
449                 let align = type_of::align_of(bcx.ccx(), content_ty);
450                 let llalign = C_uint(bcx.ccx(), align);
451                 let llty_ptr = llty.ptr_to();
452                 let box_ty = bcx.tcx().mk_box(content_ty);
453                 let mut llval = None;
454                 let bcx = bcx.map_block(|bcx| {
455                     let Result { bcx, val } = base::malloc_raw_dyn(bcx,
456                                                                    llty_ptr,
457                                                                    box_ty,
458                                                                    llsize,
459                                                                    llalign,
460                                                                    DebugLoc::None);
461                     llval = Some(val);
462                     bcx
463                 });
464                 let operand = OperandRef {
465                     val: OperandValue::Immediate(llval.unwrap()),
466                     ty: box_ty,
467                 };
468                 (bcx, operand)
469             }
470
471             mir::Rvalue::Use(..) |
472             mir::Rvalue::Repeat(..) |
473             mir::Rvalue::Aggregate(..) |
474             mir::Rvalue::Slice { .. } |
475             mir::Rvalue::InlineAsm(..) => {
476                 bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
477             }
478         }
479     }
480
481     pub fn trans_scalar_binop(&mut self,
482                               bcx: &BlockAndBuilder<'bcx, 'tcx>,
483                               op: mir::BinOp,
484                               lhs: ValueRef,
485                               rhs: ValueRef,
486                               input_ty: Ty<'tcx>) -> ValueRef {
487         let is_float = input_ty.is_fp();
488         let is_signed = input_ty.is_signed();
489         match op {
490             mir::BinOp::Add => if is_float {
491                 bcx.fadd(lhs, rhs)
492             } else {
493                 bcx.add(lhs, rhs)
494             },
495             mir::BinOp::Sub => if is_float {
496                 bcx.fsub(lhs, rhs)
497             } else {
498                 bcx.sub(lhs, rhs)
499             },
500             mir::BinOp::Mul => if is_float {
501                 bcx.fmul(lhs, rhs)
502             } else {
503                 bcx.mul(lhs, rhs)
504             },
505             mir::BinOp::Div => if is_float {
506                 bcx.fdiv(lhs, rhs)
507             } else if is_signed {
508                 bcx.sdiv(lhs, rhs)
509             } else {
510                 bcx.udiv(lhs, rhs)
511             },
512             mir::BinOp::Rem => if is_float {
513                 // LLVM currently always lowers the `frem` instructions appropriate
514                 // library calls typically found in libm. Notably f64 gets wired up
515                 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
516                 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
517                 // instead just an inline function in a header that goes up to a
518                 // f64, uses `fmod`, and then comes back down to a f32.
519                 //
520                 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
521                 // still unconditionally lower frem instructions over 32-bit floats
522                 // to a call to `fmodf`. To work around this we special case MSVC
523                 // 32-bit float rem instructions and instead do the call out to
524                 // `fmod` ourselves.
525                 //
526                 // Note that this is currently duplicated with src/libcore/ops.rs
527                 // which does the same thing, and it would be nice to perhaps unify
528                 // these two implementations one day! Also note that we call `fmod`
529                 // for both 32 and 64-bit floats because if we emit any FRem
530                 // instruction at all then LLVM is capable of optimizing it into a
531                 // 32-bit FRem (which we're trying to avoid).
532                 let tcx = bcx.tcx();
533                 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
534                     tcx.sess.target.target.arch == "x86";
535                 if use_fmod {
536                     let f64t = Type::f64(bcx.ccx());
537                     let fty = Type::func(&[f64t, f64t], &f64t);
538                     let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty);
539                     if input_ty == tcx.types.f32 {
540                         let lllhs = bcx.fpext(lhs, f64t);
541                         let llrhs = bcx.fpext(rhs, f64t);
542                         let llres = bcx.call(llfn, &[lllhs, llrhs], None);
543                         bcx.fptrunc(llres, Type::f32(bcx.ccx()))
544                     } else {
545                         bcx.call(llfn, &[lhs, rhs], None)
546                     }
547                 } else {
548                     bcx.frem(lhs, rhs)
549                 }
550             } else if is_signed {
551                 bcx.srem(lhs, rhs)
552             } else {
553                 bcx.urem(lhs, rhs)
554             },
555             mir::BinOp::BitOr => bcx.or(lhs, rhs),
556             mir::BinOp::BitAnd => bcx.and(lhs, rhs),
557             mir::BinOp::BitXor => bcx.xor(lhs, rhs),
558             mir::BinOp::Shl => {
559                 bcx.with_block(|bcx| {
560                     common::build_unchecked_lshift(bcx,
561                                                    lhs,
562                                                    rhs,
563                                                    DebugLoc::None)
564                 })
565             }
566             mir::BinOp::Shr => {
567                 bcx.with_block(|bcx| {
568                     common::build_unchecked_rshift(bcx,
569                                                    input_ty,
570                                                    lhs,
571                                                    rhs,
572                                                    DebugLoc::None)
573                 })
574             }
575             mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
576             mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
577                 bcx.with_block(|bcx| {
578                     base::compare_scalar_types(bcx, lhs, rhs, input_ty,
579                                                op.to_hir_binop(), DebugLoc::None)
580                 })
581             }
582         }
583     }
584 }
585
586 pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
587     match *rvalue {
588         mir::Rvalue::Ref(..) |
589         mir::Rvalue::Len(..) |
590         mir::Rvalue::Cast(..) | // (*)
591         mir::Rvalue::BinaryOp(..) |
592         mir::Rvalue::UnaryOp(..) |
593         mir::Rvalue::Box(..) =>
594             true,
595         mir::Rvalue::Use(..) | // (**)
596         mir::Rvalue::Repeat(..) |
597         mir::Rvalue::Aggregate(..) |
598         mir::Rvalue::Slice { .. } |
599         mir::Rvalue::InlineAsm(..) =>
600             false,
601     }
602
603     // (*) this is only true if the type is suitable
604     // (**) we need to zero-out the source operand after moving, so we are restricted to either
605     // ensuring all users of `Use` zero it out themselves or not allowing to “create” operand for
606     // it.
607 }