]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/trans/mir/rvalue.rs
Rollup merge of #32147 - steveklabnik:gh31950, r=bluss
[rust.git] / src / librustc_trans / trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::ValueRef;
12 use rustc::middle::ty::{self, Ty};
13 use middle::ty::cast::{CastTy, IntTy};
14 use rustc::mir::repr as mir;
15
16 use trans::asm;
17 use trans::base;
18 use trans::callee::Callee;
19 use trans::common::{self, BlockAndBuilder, Result};
20 use trans::debuginfo::DebugLoc;
21 use trans::declare;
22 use trans::expr;
23 use trans::adt;
24 use trans::machine;
25 use trans::type_::Type;
26 use trans::type_of;
27 use trans::tvec;
28 use trans::Disr;
29
30 use super::MirContext;
31 use super::operand::{OperandRef, OperandValue};
32 use super::lvalue::LvalueRef;
33
34 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
35     pub fn trans_rvalue(&mut self,
36                         bcx: BlockAndBuilder<'bcx, 'tcx>,
37                         dest: LvalueRef<'tcx>,
38                         rvalue: &mir::Rvalue<'tcx>)
39                         -> BlockAndBuilder<'bcx, 'tcx>
40     {
41         debug!("trans_rvalue(dest.llval={}, rvalue={:?})",
42                bcx.val_to_string(dest.llval),
43                rvalue);
44
45         match *rvalue {
46            mir::Rvalue::Use(ref operand) => {
47                let tr_operand = self.trans_operand(&bcx, operand);
48                // FIXME: consider not copying constants through stack. (fixable by translating
49                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
50                self.store_operand(&bcx, dest.llval, tr_operand);
51                self.set_operand_dropped(&bcx, operand);
52                bcx
53            }
54
55             mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => {
56                 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
57                     // into-coerce of a thin pointer to a fat pointer - just
58                     // use the operand path.
59                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
60                     self.store_operand(&bcx, dest.llval, temp);
61                     return bcx;
62                 }
63
64                 // Unsize of a nontrivial struct. I would prefer for
65                 // this to be eliminated by MIR translation, but
66                 // `CoerceUnsized` can be passed by a where-clause,
67                 // so the (generic) MIR may not be able to expand it.
68                 let operand = self.trans_operand(&bcx, operand);
69                 bcx.with_block(|bcx| {
70                     match operand.val {
71                         OperandValue::FatPtr(..) => unreachable!(),
72                         OperandValue::Immediate(llval) => {
73                             // unsize from an immediate structure. We don't
74                             // really need a temporary alloca here, but
75                             // avoiding it would require us to have
76                             // `coerce_unsized_into` use extractvalue to
77                             // index into the struct, and this case isn't
78                             // important enough for it.
79                             debug!("trans_rvalue: creating ugly alloca");
80                             let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
81                             base::store_ty(bcx, llval, lltemp, operand.ty);
82                             base::coerce_unsized_into(bcx,
83                                                       lltemp, operand.ty,
84                                                       dest.llval, cast_ty);
85                         }
86                         OperandValue::Ref(llref) => {
87                             base::coerce_unsized_into(bcx,
88                                                       llref, operand.ty,
89                                                       dest.llval, cast_ty);
90                         }
91                     }
92                 });
93                 bcx
94             }
95
96             mir::Rvalue::Repeat(ref elem, ref count) => {
97                 let tr_elem = self.trans_operand(&bcx, elem);
98                 let size = self.trans_constval(&bcx, &count.value, count.ty).immediate();
99                 let bcx = bcx.map_block(|block| {
100                     let base = expr::get_dataptr(block, dest.llval);
101                     tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
102                         self.store_operand_direct(block, llslot, tr_elem);
103                         block
104                     })
105                 });
106                 self.set_operand_dropped(&bcx, elem);
107                 bcx
108             }
109
110             mir::Rvalue::Aggregate(ref kind, ref operands) => {
111                 match *kind {
112                     mir::AggregateKind::Adt(adt_def, index, _) => {
113                         let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx()));
114                         let disr = Disr::from(adt_def.variants[index].disr_val);
115                         bcx.with_block(|bcx| {
116                             adt::trans_set_discr(bcx, &repr, dest.llval, Disr::from(disr));
117                         });
118                         for (i, operand) in operands.iter().enumerate() {
119                             let op = self.trans_operand(&bcx, operand);
120                             // Do not generate stores and GEPis for zero-sized fields.
121                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
122                                 let val = adt::MaybeSizedValue::sized(dest.llval);
123                                 let lldest_i = bcx.with_block(|bcx| {
124                                     adt::trans_field_ptr(bcx, &repr, val, disr, i)
125                                 });
126                                 self.store_operand(&bcx, lldest_i, op);
127                                 self.set_operand_dropped(&bcx, operand);
128                             }
129                         }
130                     },
131                     _ => {
132                         for (i, operand) in operands.iter().enumerate() {
133                             let op = self.trans_operand(&bcx, operand);
134                             // Do not generate stores and GEPis for zero-sized fields.
135                             if !common::type_is_zero_size(bcx.ccx(), op.ty) {
136                                 // Note: perhaps this should be StructGep, but
137                                 // note that in some cases the values here will
138                                 // not be structs but arrays.
139                                 let dest = bcx.gepi(dest.llval, &[0, i]);
140                                 self.store_operand(&bcx, dest, op);
141                                 self.set_operand_dropped(&bcx, operand);
142                             }
143                         }
144                     }
145                 }
146                 bcx
147             }
148
149             mir::Rvalue::Slice { ref input, from_start, from_end } => {
150                 let ccx = bcx.ccx();
151                 let input = self.trans_lvalue(&bcx, input);
152                 let (llbase, lllen) = bcx.with_block(|bcx| {
153                     tvec::get_base_and_len(bcx,
154                                            input.llval,
155                                            input.ty.to_ty(bcx.tcx()))
156                 });
157                 let llbase1 = bcx.gepi(llbase, &[from_start]);
158                 let adj = common::C_uint(ccx, from_start + from_end);
159                 let lllen1 = bcx.sub(lllen, adj);
160                 let (lladdrdest, llmetadest) = bcx.with_block(|bcx| {
161                     (expr::get_dataptr(bcx, dest.llval), expr::get_meta(bcx, dest.llval))
162                 });
163                 bcx.store(llbase1, lladdrdest);
164                 bcx.store(lllen1, llmetadest);
165                 bcx
166             }
167
168             mir::Rvalue::InlineAsm(ref inline_asm) => {
169                 bcx.map_block(|bcx| {
170                     asm::trans_inline_asm(bcx, inline_asm)
171                 })
172             }
173
174             _ => {
175                 assert!(rvalue_creates_operand(rvalue));
176                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
177                 self.store_operand(&bcx, dest.llval, temp);
178                 bcx
179             }
180         }
181     }
182
183     pub fn trans_rvalue_operand(&mut self,
184                                 bcx: BlockAndBuilder<'bcx, 'tcx>,
185                                 rvalue: &mir::Rvalue<'tcx>)
186                                 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
187     {
188         assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
189
190         match *rvalue {
191             mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => {
192                 let operand = self.trans_operand(&bcx, operand);
193                 debug!("cast operand is {}", operand.repr(&bcx));
194                 let cast_ty = bcx.monomorphize(&cast_ty);
195
196                 let val = match *kind {
197                     mir::CastKind::ReifyFnPointer => {
198                         match operand.ty.sty {
199                             ty::TyFnDef(def_id, substs, _) => {
200                                 OperandValue::Immediate(
201                                     Callee::def(bcx.ccx(), def_id, substs, operand.ty)
202                                         .reify(bcx.ccx()).val)
203                             }
204                             _ => {
205                                 unreachable!("{} cannot be reified to a fn ptr", operand.ty)
206                             }
207                         }
208                     }
209                     mir::CastKind::UnsafeFnPointer => {
210                         // this is a no-op at the LLVM level
211                         operand.val
212                     }
213                     mir::CastKind::Unsize => {
214                         // unsize targets other than to a fat pointer currently
215                         // can't be operands.
216                         assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
217
218                         match operand.val {
219                             OperandValue::FatPtr(..) => {
220                                 // unsize from a fat pointer - this is a
221                                 // "trait-object-to-supertrait" coercion, for
222                                 // example,
223                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
224                                 // and is a no-op at the LLVM level
225                                 operand.val
226                             }
227                             OperandValue::Immediate(lldata) => {
228                                 // "standard" unsize
229                                 let (lldata, llextra) = bcx.with_block(|bcx| {
230                                     base::unsize_thin_ptr(bcx, lldata,
231                                                           operand.ty, cast_ty)
232                                 });
233                                 OperandValue::FatPtr(lldata, llextra)
234                             }
235                             OperandValue::Ref(_) => {
236                                 bcx.sess().bug(
237                                     &format!("by-ref operand {} in trans_rvalue_operand",
238                                              operand.repr(&bcx)));
239                             }
240                         }
241                     }
242                     mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => {
243                         debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
244                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
245                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
246                         let ll_t_in = type_of::arg_type_of(bcx.ccx(), operand.ty);
247                         let ll_t_out = type_of::arg_type_of(bcx.ccx(), cast_ty);
248                         let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
249                             let repr = adt::represent_type(bcx.ccx(), operand.ty);
250                             let llval = operand.immediate();
251                             let discr = bcx.with_block(|bcx| {
252                                 adt::trans_get_discr(bcx, &repr, llval, None, true)
253                             });
254                             (discr, common::val_ty(discr), adt::is_discr_signed(&repr))
255                         } else {
256                             (operand.immediate(), ll_t_in, operand.ty.is_signed())
257                         };
258
259                         let newval = match (r_t_in, r_t_out) {
260                             (CastTy::Int(_), CastTy::Int(_)) => {
261                                 let srcsz = ll_t_in.int_width();
262                                 let dstsz = ll_t_out.int_width();
263                                 if srcsz == dstsz {
264                                     bcx.bitcast(llval, ll_t_out)
265                                 } else if srcsz > dstsz {
266                                     bcx.trunc(llval, ll_t_out)
267                                 } else if signed {
268                                     bcx.sext(llval, ll_t_out)
269                                 } else {
270                                     bcx.zext(llval, ll_t_out)
271                                 }
272                             }
273                             (CastTy::Float, CastTy::Float) => {
274                                 let srcsz = ll_t_in.float_width();
275                                 let dstsz = ll_t_out.float_width();
276                                 if dstsz > srcsz {
277                                     bcx.fpext(llval, ll_t_out)
278                                 } else if srcsz > dstsz {
279                                     bcx.fptrunc(llval, ll_t_out)
280                                 } else {
281                                     llval
282                                 }
283                             }
284                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
285                             (CastTy::FnPtr, CastTy::Ptr(_)) |
286                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
287                                 bcx.pointercast(llval, ll_t_out),
288                             (CastTy::Ptr(_), CastTy::Int(_)) |
289                             (CastTy::FnPtr, CastTy::Int(_)) =>
290                                 bcx.ptrtoint(llval, ll_t_out),
291                             (CastTy::Int(_), CastTy::Ptr(_)) =>
292                                 bcx.inttoptr(llval, ll_t_out),
293                             (CastTy::Int(_), CastTy::Float) if signed =>
294                                 bcx.sitofp(llval, ll_t_out),
295                             (CastTy::Int(_), CastTy::Float) =>
296                                 bcx.uitofp(llval, ll_t_out),
297                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
298                                 bcx.fptosi(llval, ll_t_out),
299                             (CastTy::Float, CastTy::Int(_)) =>
300                                 bcx.fptoui(llval, ll_t_out),
301                             _ => bcx.ccx().sess().bug(
302                                 &format!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
303                             )
304                         };
305                         OperandValue::Immediate(newval)
306                     }
307                     mir::CastKind::Misc => { // Casts from a fat-ptr.
308                         let ll_cast_ty = type_of::arg_type_of(bcx.ccx(), cast_ty);
309                         let ll_from_ty = type_of::arg_type_of(bcx.ccx(), operand.ty);
310                         if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
311                             if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
312                                 let ll_cft = ll_cast_ty.field_types();
313                                 let ll_fft = ll_from_ty.field_types();
314                                 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
315                                 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
316                                 OperandValue::FatPtr(data_cast, meta_ptr)
317                             } else { // cast to thin-ptr
318                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
319                                 // pointer-cast of that pointer to desired pointer type.
320                                 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
321                                 OperandValue::Immediate(llval)
322                             }
323                         } else {
324                             panic!("Unexpected non-FatPtr operand")
325                         }
326                     }
327                 };
328                 let operand = OperandRef {
329                     val: val,
330                     ty: cast_ty
331                 };
332                 (bcx, operand)
333             }
334
335             mir::Rvalue::Ref(_, bk, ref lvalue) => {
336                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
337
338                 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
339                 let ref_ty = bcx.tcx().mk_ref(
340                     bcx.tcx().mk_region(ty::ReStatic),
341                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
342                 );
343
344                 // Note: lvalues are indirect, so storing the `llval` into the
345                 // destination effectively creates a reference.
346                 let operand = if common::type_is_sized(bcx.tcx(), ty) {
347                     OperandRef {
348                         val: OperandValue::Immediate(tr_lvalue.llval),
349                         ty: ref_ty,
350                     }
351                 } else {
352                     OperandRef {
353                         val: OperandValue::FatPtr(tr_lvalue.llval,
354                                                   tr_lvalue.llextra),
355                         ty: ref_ty,
356                     }
357                 };
358                 (bcx, operand)
359             }
360
361             mir::Rvalue::Len(ref lvalue) => {
362                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
363                 let operand = OperandRef {
364                     val: OperandValue::Immediate(self.lvalue_len(&bcx, tr_lvalue)),
365                     ty: bcx.tcx().types.usize,
366                 };
367                 (bcx, operand)
368             }
369
370             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
371                 let lhs = self.trans_operand(&bcx, lhs);
372                 let rhs = self.trans_operand(&bcx, rhs);
373                 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
374                     match (lhs.val, rhs.val) {
375                         (OperandValue::FatPtr(lhs_addr, lhs_extra),
376                          OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
377                             bcx.with_block(|bcx| {
378                                 base::compare_fat_ptrs(bcx,
379                                                        lhs_addr, lhs_extra,
380                                                        rhs_addr, rhs_extra,
381                                                        lhs.ty, op.to_hir_binop(),
382                                                        DebugLoc::None)
383                             })
384                         }
385                         _ => unreachable!()
386                     }
387
388                 } else {
389                     self.trans_scalar_binop(&bcx, op,
390                                             lhs.immediate(), rhs.immediate(),
391                                             lhs.ty)
392                 };
393                 let operand = OperandRef {
394                     val: OperandValue::Immediate(llresult),
395                     ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
396                 };
397                 (bcx, operand)
398             }
399
400             mir::Rvalue::UnaryOp(op, ref operand) => {
401                 let operand = self.trans_operand(&bcx, operand);
402                 let lloperand = operand.immediate();
403                 let is_float = operand.ty.is_fp();
404                 let llval = match op {
405                     mir::UnOp::Not => bcx.not(lloperand),
406                     mir::UnOp::Neg => if is_float {
407                         bcx.fneg(lloperand)
408                     } else {
409                         bcx.neg(lloperand)
410                     }
411                 };
412                 (bcx, OperandRef {
413                     val: OperandValue::Immediate(llval),
414                     ty: operand.ty,
415                 })
416             }
417
418             mir::Rvalue::Box(content_ty) => {
419                 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
420                 let llty = type_of::type_of(bcx.ccx(), content_ty);
421                 let llsize = machine::llsize_of(bcx.ccx(), llty);
422                 let align = type_of::align_of(bcx.ccx(), content_ty);
423                 let llalign = common::C_uint(bcx.ccx(), align);
424                 let llty_ptr = llty.ptr_to();
425                 let box_ty = bcx.tcx().mk_box(content_ty);
426                 let mut llval = None;
427                 let bcx = bcx.map_block(|bcx| {
428                     let Result { bcx, val } = base::malloc_raw_dyn(bcx,
429                                                                    llty_ptr,
430                                                                    box_ty,
431                                                                    llsize,
432                                                                    llalign,
433                                                                    DebugLoc::None);
434                     llval = Some(val);
435                     bcx
436                 });
437                 let operand = OperandRef {
438                     val: OperandValue::Immediate(llval.unwrap()),
439                     ty: box_ty,
440                 };
441                 (bcx, operand)
442             }
443
444             mir::Rvalue::Use(..) |
445             mir::Rvalue::Repeat(..) |
446             mir::Rvalue::Aggregate(..) |
447             mir::Rvalue::Slice { .. } |
448             mir::Rvalue::InlineAsm(..) => {
449                 bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
450             }
451         }
452     }
453
454     pub fn trans_scalar_binop(&mut self,
455                               bcx: &BlockAndBuilder<'bcx, 'tcx>,
456                               op: mir::BinOp,
457                               lhs: ValueRef,
458                               rhs: ValueRef,
459                               input_ty: Ty<'tcx>) -> ValueRef {
460         let is_float = input_ty.is_fp();
461         let is_signed = input_ty.is_signed();
462         match op {
463             mir::BinOp::Add => if is_float {
464                 bcx.fadd(lhs, rhs)
465             } else {
466                 bcx.add(lhs, rhs)
467             },
468             mir::BinOp::Sub => if is_float {
469                 bcx.fsub(lhs, rhs)
470             } else {
471                 bcx.sub(lhs, rhs)
472             },
473             mir::BinOp::Mul => if is_float {
474                 bcx.fmul(lhs, rhs)
475             } else {
476                 bcx.mul(lhs, rhs)
477             },
478             mir::BinOp::Div => if is_float {
479                 bcx.fdiv(lhs, rhs)
480             } else if is_signed {
481                 bcx.sdiv(lhs, rhs)
482             } else {
483                 bcx.udiv(lhs, rhs)
484             },
485             mir::BinOp::Rem => if is_float {
486                 // LLVM currently always lowers the `frem` instructions appropriate
487                 // library calls typically found in libm. Notably f64 gets wired up
488                 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
489                 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
490                 // instead just an inline function in a header that goes up to a
491                 // f64, uses `fmod`, and then comes back down to a f32.
492                 //
493                 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
494                 // still unconditionally lower frem instructions over 32-bit floats
495                 // to a call to `fmodf`. To work around this we special case MSVC
496                 // 32-bit float rem instructions and instead do the call out to
497                 // `fmod` ourselves.
498                 //
499                 // Note that this is currently duplicated with src/libcore/ops.rs
500                 // which does the same thing, and it would be nice to perhaps unify
501                 // these two implementations one day! Also note that we call `fmod`
502                 // for both 32 and 64-bit floats because if we emit any FRem
503                 // instruction at all then LLVM is capable of optimizing it into a
504                 // 32-bit FRem (which we're trying to avoid).
505                 let tcx = bcx.tcx();
506                 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
507                     tcx.sess.target.target.arch == "x86";
508                 if use_fmod {
509                     let f64t = Type::f64(bcx.ccx());
510                     let fty = Type::func(&[f64t, f64t], &f64t);
511                     let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
512                                                     tcx.types.f64);
513                     if input_ty == tcx.types.f32 {
514                         let lllhs = bcx.fpext(lhs, f64t);
515                         let llrhs = bcx.fpext(rhs, f64t);
516                         let llres = bcx.call(llfn, &[lllhs, llrhs], None, None);
517                         bcx.fptrunc(llres, Type::f32(bcx.ccx()))
518                     } else {
519                         bcx.call(llfn, &[lhs, rhs], None, None)
520                     }
521                 } else {
522                     bcx.frem(lhs, rhs)
523                 }
524             } else if is_signed {
525                 bcx.srem(lhs, rhs)
526             } else {
527                 bcx.urem(lhs, rhs)
528             },
529             mir::BinOp::BitOr => bcx.or(lhs, rhs),
530             mir::BinOp::BitAnd => bcx.and(lhs, rhs),
531             mir::BinOp::BitXor => bcx.xor(lhs, rhs),
532             mir::BinOp::Shl => {
533                 bcx.with_block(|bcx| {
534                     common::build_unchecked_lshift(bcx,
535                                                    lhs,
536                                                    rhs,
537                                                    DebugLoc::None)
538                 })
539             }
540             mir::BinOp::Shr => {
541                 bcx.with_block(|bcx| {
542                     common::build_unchecked_rshift(bcx,
543                                                    input_ty,
544                                                    lhs,
545                                                    rhs,
546                                                    DebugLoc::None)
547                 })
548             }
549             mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
550             mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
551                 bcx.with_block(|bcx| {
552                     base::compare_scalar_types(bcx, lhs, rhs, input_ty,
553                                                op.to_hir_binop(), DebugLoc::None)
554                 })
555             }
556         }
557     }
558 }
559
560 pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
561     match *rvalue {
562         mir::Rvalue::Ref(..) |
563         mir::Rvalue::Len(..) |
564         mir::Rvalue::Cast(..) | // (*)
565         mir::Rvalue::BinaryOp(..) |
566         mir::Rvalue::UnaryOp(..) |
567         mir::Rvalue::Box(..) =>
568             true,
569         mir::Rvalue::Use(..) | // (**)
570         mir::Rvalue::Repeat(..) |
571         mir::Rvalue::Aggregate(..) |
572         mir::Rvalue::Slice { .. } |
573         mir::Rvalue::InlineAsm(..) =>
574             false,
575     }
576
577     // (*) this is only true if the type is suitable
578     // (**) we need to zero-out the source operand after moving, so we are restricted to either
579     // ensuring all users of `Use` zero it out themselves or not allowing to “create” operand for
580     // it.
581 }