]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Rollup merge of #102092 - kxxt:patch-1, r=joshtriplett
[rust.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate};
7 use crate::meth::get_vtable;
8 use crate::traits::*;
9 use crate::MemFlags;
10
11 use rustc_middle::mir;
12 use rustc_middle::mir::Operand;
13 use rustc_middle::ty::cast::{CastTy, IntTy};
14 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
15 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
16 use rustc_span::source_map::{Span, DUMMY_SP};
17 use rustc_target::abi::Size;
18
19 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
20     #[instrument(level = "trace", skip(self, bx))]
21     pub fn codegen_rvalue(
22         &mut self,
23         mut bx: Bx,
24         dest: PlaceRef<'tcx, Bx::Value>,
25         rvalue: &mir::Rvalue<'tcx>,
26     ) -> Bx {
27         match *rvalue {
28             mir::Rvalue::Use(ref operand) => {
29                 let cg_operand = self.codegen_operand(&mut bx, operand);
30                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
31                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
32                 cg_operand.val.store(&mut bx, dest);
33                 bx
34             }
35
36             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
37                 // The destination necessarily contains a fat pointer, so if
38                 // it's a scalar pair, it's a fat pointer or newtype thereof.
39                 if bx.cx().is_backend_scalar_pair(dest.layout) {
40                     // Into-coerce of a thin pointer to a fat pointer -- just
41                     // use the operand path.
42                     let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
43                     temp.val.store(&mut bx, dest);
44                     return bx;
45                 }
46
47                 // Unsize of a nontrivial struct. I would prefer for
48                 // this to be eliminated by MIR building, but
49                 // `CoerceUnsized` can be passed by a where-clause,
50                 // so the (generic) MIR may not be able to expand it.
51                 let operand = self.codegen_operand(&mut bx, source);
52                 match operand.val {
53                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
54                         // Unsize from an immediate structure. We don't
55                         // really need a temporary alloca here, but
56                         // avoiding it would require us to have
57                         // `coerce_unsized_into` use `extractvalue` to
58                         // index into the struct, and this case isn't
59                         // important enough for it.
60                         debug!("codegen_rvalue: creating ugly alloca");
61                         let scratch = PlaceRef::alloca(&mut bx, operand.layout);
62                         scratch.storage_live(&mut bx);
63                         operand.val.store(&mut bx, scratch);
64                         base::coerce_unsized_into(&mut bx, scratch, dest);
65                         scratch.storage_dead(&mut bx);
66                     }
67                     OperandValue::Ref(llref, None, align) => {
68                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
69                         base::coerce_unsized_into(&mut bx, source, dest);
70                     }
71                     OperandValue::Ref(_, Some(_), _) => {
72                         bug!("unsized coercion on an unsized rvalue");
73                     }
74                 }
75                 bx
76             }
77
78             mir::Rvalue::Repeat(ref elem, count) => {
79                 let cg_elem = self.codegen_operand(&mut bx, elem);
80
81                 // Do not generate the loop for zero-sized elements or empty arrays.
82                 if dest.layout.is_zst() {
83                     return bx;
84                 }
85
86                 if let OperandValue::Immediate(v) = cg_elem.val {
87                     let zero = bx.const_usize(0);
88                     let start = dest.project_index(&mut bx, zero).llval;
89                     let size = bx.const_usize(dest.layout.size.bytes());
90
91                     // Use llvm.memset.p0i8.* to initialize all zero arrays
92                     if bx.cx().const_to_opt_u128(v, false) == Some(0) {
93                         let fill = bx.cx().const_u8(0);
94                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
95                         return bx;
96                     }
97
98                     // Use llvm.memset.p0i8.* to initialize byte arrays
99                     let v = bx.from_immediate(v);
100                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
101                         bx.memset(start, v, size, dest.align, MemFlags::empty());
102                         return bx;
103                     }
104                 }
105
106                 let count =
107                     self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
108
109                 bx.write_operand_repeatedly(cg_elem, count, dest)
110             }
111
112             mir::Rvalue::Aggregate(ref kind, ref operands) => {
113                 let (dest, active_field_index) = match **kind {
114                     mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
115                         dest.codegen_set_discr(&mut bx, variant_index);
116                         if bx.tcx().adt_def(adt_did).is_enum() {
117                             (dest.project_downcast(&mut bx, variant_index), active_field_index)
118                         } else {
119                             (dest, active_field_index)
120                         }
121                     }
122                     _ => (dest, None),
123                 };
124                 for (i, operand) in operands.iter().enumerate() {
125                     let op = self.codegen_operand(&mut bx, operand);
126                     // Do not generate stores and GEPis for zero-sized fields.
127                     if !op.layout.is_zst() {
128                         let field_index = active_field_index.unwrap_or(i);
129                         let field = if let mir::AggregateKind::Array(_) = **kind {
130                             let llindex = bx.cx().const_usize(field_index as u64);
131                             dest.project_index(&mut bx, llindex)
132                         } else {
133                             dest.project_field(&mut bx, field_index)
134                         };
135                         op.val.store(&mut bx, field);
136                     }
137                 }
138                 bx
139             }
140
141             _ => {
142                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
143                 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
144                 temp.val.store(&mut bx, dest);
145                 bx
146             }
147         }
148     }
149
150     pub fn codegen_rvalue_unsized(
151         &mut self,
152         mut bx: Bx,
153         indirect_dest: PlaceRef<'tcx, Bx::Value>,
154         rvalue: &mir::Rvalue<'tcx>,
155     ) -> Bx {
156         debug!(
157             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
158             indirect_dest.llval, rvalue
159         );
160
161         match *rvalue {
162             mir::Rvalue::Use(ref operand) => {
163                 let cg_operand = self.codegen_operand(&mut bx, operand);
164                 cg_operand.val.store_unsized(&mut bx, indirect_dest);
165                 bx
166             }
167
168             _ => bug!("unsized assignment other than `Rvalue::Use`"),
169         }
170     }
171
172     pub fn codegen_rvalue_operand(
173         &mut self,
174         mut bx: Bx,
175         rvalue: &mir::Rvalue<'tcx>,
176     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
177         assert!(
178             self.rvalue_creates_operand(rvalue, DUMMY_SP),
179             "cannot codegen {:?} to operand",
180             rvalue,
181         );
182
183         match *rvalue {
184             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
185                 let operand = self.codegen_operand(&mut bx, source);
186                 debug!("cast operand is {:?}", operand);
187                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
188
189                 let val = match *kind {
190                     mir::CastKind::PointerExposeAddress => {
191                         assert!(bx.cx().is_backend_immediate(cast));
192                         let llptr = operand.immediate();
193                         let llcast_ty = bx.cx().immediate_backend_type(cast);
194                         let lladdr = bx.ptrtoint(llptr, llcast_ty);
195                         OperandValue::Immediate(lladdr)
196                     }
197                     mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
198                         match *operand.layout.ty.kind() {
199                             ty::FnDef(def_id, substs) => {
200                                 let instance = ty::Instance::resolve_for_fn_ptr(
201                                     bx.tcx(),
202                                     ty::ParamEnv::reveal_all(),
203                                     def_id,
204                                     substs,
205                                 )
206                                 .unwrap()
207                                 .polymorphize(bx.cx().tcx());
208                                 OperandValue::Immediate(bx.get_fn_addr(instance))
209                             }
210                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
211                         }
212                     }
213                     mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
214                         match *operand.layout.ty.kind() {
215                             ty::Closure(def_id, substs) => {
216                                 let instance = Instance::resolve_closure(
217                                     bx.cx().tcx(),
218                                     def_id,
219                                     substs,
220                                     ty::ClosureKind::FnOnce,
221                                 )
222                                 .expect("failed to normalize and resolve closure during codegen")
223                                 .polymorphize(bx.cx().tcx());
224                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
225                             }
226                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
227                         }
228                     }
229                     mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
230                         // This is a no-op at the LLVM level.
231                         operand.val
232                     }
233                     mir::CastKind::Pointer(PointerCast::Unsize) => {
234                         assert!(bx.cx().is_backend_scalar_pair(cast));
235                         let (lldata, llextra) = match operand.val {
236                             OperandValue::Pair(lldata, llextra) => {
237                                 // unsize from a fat pointer -- this is a
238                                 // "trait-object-to-supertrait" coercion.
239                                 (lldata, Some(llextra))
240                             }
241                             OperandValue::Immediate(lldata) => {
242                                 // "standard" unsize
243                                 (lldata, None)
244                             }
245                             OperandValue::Ref(..) => {
246                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
247                             }
248                         };
249                         let (lldata, llextra) =
250                             base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
251                         OperandValue::Pair(lldata, llextra)
252                     }
253                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
254                     | mir::CastKind::PtrToPtr
255                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
256                     {
257                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
258                             if bx.cx().is_backend_scalar_pair(cast) {
259                                 let data_cast = bx.pointercast(
260                                     data_ptr,
261                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
262                                 );
263                                 OperandValue::Pair(data_cast, meta)
264                             } else {
265                                 // cast to thin-ptr
266                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
267                                 // pointer-cast of that pointer to desired pointer type.
268                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
269                                 let llval = bx.pointercast(data_ptr, llcast_ty);
270                                 OperandValue::Immediate(llval)
271                             }
272                         } else {
273                             bug!("unexpected non-pair operand");
274                         }
275                     }
276                     mir::CastKind::DynStar => {
277                         let data = match operand.val {
278                             OperandValue::Ref(_, _, _) => todo!(),
279                             OperandValue::Immediate(v) => v,
280                             OperandValue::Pair(_, _) => todo!(),
281                         };
282                         let trait_ref =
283                             if let ty::Dynamic(data, _, ty::DynStar) = cast.ty.kind() {
284                                 data.principal()
285                             } else {
286                                 bug!("Only valid to do a DynStar cast into a DynStar type")
287                             };
288                         let vtable = get_vtable(bx.cx(), source.ty(self.mir, bx.tcx()), trait_ref);
289                         let vtable = bx.pointercast(vtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
290                         // FIXME(dyn-star): this is probably not the best way to check if this is
291                         // a pointer, and really we should ensure that the value is a suitable
292                         // pointer earlier in the compilation process.
293                         let data = match operand.layout.pointee_info_at(bx.cx(), Size::ZERO) {
294                             Some(_) => bx.ptrtoint(data, bx.cx().type_isize()),
295                             None => data,
296                         };
297                         OperandValue::Pair(data, vtable)
298                     }
299                     mir::CastKind::Pointer(
300                         PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
301                     )
302                     | mir::CastKind::IntToInt
303                     | mir::CastKind::FloatToInt
304                     | mir::CastKind::FloatToFloat
305                     | mir::CastKind::IntToFloat
306                     | mir::CastKind::PtrToPtr
307                     | mir::CastKind::FnPtrToPtr
308
309                     // Since int2ptr can have arbitrary integer types as input (so we have to do
310                     // sign extension and all that), it is currently best handled in the same code
311                     // path as the other integer-to-X casts.
312                     | mir::CastKind::PointerFromExposedAddress => {
313                         assert!(bx.cx().is_backend_immediate(cast));
314                         let ll_t_out = bx.cx().immediate_backend_type(cast);
315                         if operand.layout.abi.is_uninhabited() {
316                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
317                             return (bx, OperandRef { val, layout: cast });
318                         }
319                         let r_t_in =
320                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
321                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
322                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
323                         let llval = operand.immediate();
324
325                         let newval = match (r_t_in, r_t_out) {
326                             (CastTy::Int(i), CastTy::Int(_)) => {
327                                 bx.intcast(llval, ll_t_out, i.is_signed())
328                             }
329                             (CastTy::Float, CastTy::Float) => {
330                                 let srcsz = bx.cx().float_width(ll_t_in);
331                                 let dstsz = bx.cx().float_width(ll_t_out);
332                                 if dstsz > srcsz {
333                                     bx.fpext(llval, ll_t_out)
334                                 } else if srcsz > dstsz {
335                                     bx.fptrunc(llval, ll_t_out)
336                                 } else {
337                                     llval
338                                 }
339                             }
340                             (CastTy::Int(i), CastTy::Float) => {
341                                 if i.is_signed() {
342                                     bx.sitofp(llval, ll_t_out)
343                                 } else {
344                                     bx.uitofp(llval, ll_t_out)
345                                 }
346                             }
347                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
348                                 bx.pointercast(llval, ll_t_out)
349                             }
350                             (CastTy::Int(i), CastTy::Ptr(_)) => {
351                                 let usize_llval =
352                                     bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
353                                 bx.inttoptr(usize_llval, ll_t_out)
354                             }
355                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
356                                 bx.cast_float_to_int(true, llval, ll_t_out)
357                             }
358                             (CastTy::Float, CastTy::Int(_)) => {
359                                 bx.cast_float_to_int(false, llval, ll_t_out)
360                             }
361                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
362                         };
363                         OperandValue::Immediate(newval)
364                     }
365                 };
366                 (bx, OperandRef { val, layout: cast })
367             }
368
369             mir::Rvalue::Ref(_, bk, place) => {
370                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
371                     tcx.mk_ref(
372                         tcx.lifetimes.re_erased,
373                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
374                     )
375                 };
376                 self.codegen_place_to_pointer(bx, place, mk_ref)
377             }
378
379             mir::Rvalue::CopyForDeref(place) => {
380                 let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
381                 (bx, operand)
382             }
383             mir::Rvalue::AddressOf(mutability, place) => {
384                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
385                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
386                 };
387                 self.codegen_place_to_pointer(bx, place, mk_ptr)
388             }
389
390             mir::Rvalue::Len(place) => {
391                 let size = self.evaluate_array_len(&mut bx, place);
392                 let operand = OperandRef {
393                     val: OperandValue::Immediate(size),
394                     layout: bx.cx().layout_of(bx.tcx().types.usize),
395                 };
396                 (bx, operand)
397             }
398
399             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
400                 let lhs = self.codegen_operand(&mut bx, lhs);
401                 let rhs = self.codegen_operand(&mut bx, rhs);
402                 let llresult = match (lhs.val, rhs.val) {
403                     (
404                         OperandValue::Pair(lhs_addr, lhs_extra),
405                         OperandValue::Pair(rhs_addr, rhs_extra),
406                     ) => self.codegen_fat_ptr_binop(
407                         &mut bx,
408                         op,
409                         lhs_addr,
410                         lhs_extra,
411                         rhs_addr,
412                         rhs_extra,
413                         lhs.layout.ty,
414                     ),
415
416                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
417                         self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
418                     }
419
420                     _ => bug!(),
421                 };
422                 let operand = OperandRef {
423                     val: OperandValue::Immediate(llresult),
424                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
425                 };
426                 (bx, operand)
427             }
428             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
429                 let lhs = self.codegen_operand(&mut bx, lhs);
430                 let rhs = self.codegen_operand(&mut bx, rhs);
431                 let result = self.codegen_scalar_checked_binop(
432                     &mut bx,
433                     op,
434                     lhs.immediate(),
435                     rhs.immediate(),
436                     lhs.layout.ty,
437                 );
438                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
439                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
440                 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
441
442                 (bx, operand)
443             }
444
445             mir::Rvalue::UnaryOp(op, ref operand) => {
446                 let operand = self.codegen_operand(&mut bx, operand);
447                 let lloperand = operand.immediate();
448                 let is_float = operand.layout.ty.is_floating_point();
449                 let llval = match op {
450                     mir::UnOp::Not => bx.not(lloperand),
451                     mir::UnOp::Neg => {
452                         if is_float {
453                             bx.fneg(lloperand)
454                         } else {
455                             bx.neg(lloperand)
456                         }
457                     }
458                 };
459                 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
460             }
461
462             mir::Rvalue::Discriminant(ref place) => {
463                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
464                 let discr_ty = self.monomorphize(discr_ty);
465                 let discr = self
466                     .codegen_place(&mut bx, place.as_ref())
467                     .codegen_get_discr(&mut bx, discr_ty);
468                 (
469                     bx,
470                     OperandRef {
471                         val: OperandValue::Immediate(discr),
472                         layout: self.cx.layout_of(discr_ty),
473                     },
474                 )
475             }
476
477             mir::Rvalue::NullaryOp(null_op, ty) => {
478                 let ty = self.monomorphize(ty);
479                 assert!(bx.cx().type_is_sized(ty));
480                 let layout = bx.cx().layout_of(ty);
481                 let val = match null_op {
482                     mir::NullOp::SizeOf => layout.size.bytes(),
483                     mir::NullOp::AlignOf => layout.align.abi.bytes(),
484                 };
485                 let val = bx.cx().const_usize(val);
486                 let tcx = self.cx.tcx();
487                 (
488                     bx,
489                     OperandRef {
490                         val: OperandValue::Immediate(val),
491                         layout: self.cx.layout_of(tcx.types.usize),
492                     },
493                 )
494             }
495
496             mir::Rvalue::ThreadLocalRef(def_id) => {
497                 assert!(bx.cx().tcx().is_static(def_id));
498                 let static_ = bx.get_static(def_id);
499                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
500                 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
501                 (bx, operand)
502             }
503             mir::Rvalue::Use(ref operand) => {
504                 let operand = self.codegen_operand(&mut bx, operand);
505                 (bx, operand)
506             }
507             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
508                 // According to `rvalue_creates_operand`, only ZST
509                 // aggregate rvalues are allowed to be operands.
510                 let ty = rvalue.ty(self.mir, self.cx.tcx());
511                 let operand =
512                     OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
513                 (bx, operand)
514             }
515             mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
516                 let operand = self.codegen_operand(&mut bx, operand);
517                 let lloperand = operand.immediate();
518
519                 let content_ty = self.monomorphize(content_ty);
520                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
521                 let llty_ptr = bx.cx().backend_type(box_layout);
522
523                 let val = bx.pointercast(lloperand, llty_ptr);
524                 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
525                 (bx, operand)
526             }
527         }
528     }
529
530     fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
531         // ZST are passed as operands and require special handling
532         // because codegen_place() panics if Local is operand.
533         if let Some(index) = place.as_local() {
534             if let LocalRef::Operand(Some(op)) = self.locals[index] {
535                 if let ty::Array(_, n) = op.layout.ty.kind() {
536                     let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
537                     return bx.cx().const_usize(n);
538                 }
539             }
540         }
541         // use common size calculation for non zero-sized types
542         let cg_value = self.codegen_place(bx, place.as_ref());
543         cg_value.len(bx.cx())
544     }
545
546     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
547     fn codegen_place_to_pointer(
548         &mut self,
549         mut bx: Bx,
550         place: mir::Place<'tcx>,
551         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
552     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
553         let cg_place = self.codegen_place(&mut bx, place.as_ref());
554
555         let ty = cg_place.layout.ty;
556
557         // Note: places are indirect, so storing the `llval` into the
558         // destination effectively creates a reference.
559         let val = if !bx.cx().type_has_metadata(ty) {
560             OperandValue::Immediate(cg_place.llval)
561         } else {
562             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
563         };
564         (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
565     }
566
567     pub fn codegen_scalar_binop(
568         &mut self,
569         bx: &mut Bx,
570         op: mir::BinOp,
571         lhs: Bx::Value,
572         rhs: Bx::Value,
573         input_ty: Ty<'tcx>,
574     ) -> Bx::Value {
575         let is_float = input_ty.is_floating_point();
576         let is_signed = input_ty.is_signed();
577         match op {
578             mir::BinOp::Add => {
579                 if is_float {
580                     bx.fadd(lhs, rhs)
581                 } else {
582                     bx.add(lhs, rhs)
583                 }
584             }
585             mir::BinOp::Sub => {
586                 if is_float {
587                     bx.fsub(lhs, rhs)
588                 } else {
589                     bx.sub(lhs, rhs)
590                 }
591             }
592             mir::BinOp::Mul => {
593                 if is_float {
594                     bx.fmul(lhs, rhs)
595                 } else {
596                     bx.mul(lhs, rhs)
597                 }
598             }
599             mir::BinOp::Div => {
600                 if is_float {
601                     bx.fdiv(lhs, rhs)
602                 } else if is_signed {
603                     bx.sdiv(lhs, rhs)
604                 } else {
605                     bx.udiv(lhs, rhs)
606                 }
607             }
608             mir::BinOp::Rem => {
609                 if is_float {
610                     bx.frem(lhs, rhs)
611                 } else if is_signed {
612                     bx.srem(lhs, rhs)
613                 } else {
614                     bx.urem(lhs, rhs)
615                 }
616             }
617             mir::BinOp::BitOr => bx.or(lhs, rhs),
618             mir::BinOp::BitAnd => bx.and(lhs, rhs),
619             mir::BinOp::BitXor => bx.xor(lhs, rhs),
620             mir::BinOp::Offset => {
621                 let pointee_type = input_ty
622                     .builtin_deref(true)
623                     .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
624                     .ty;
625                 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
626                 bx.inbounds_gep(llty, lhs, &[rhs])
627             }
628             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
629             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
630             mir::BinOp::Ne
631             | mir::BinOp::Lt
632             | mir::BinOp::Gt
633             | mir::BinOp::Eq
634             | mir::BinOp::Le
635             | mir::BinOp::Ge => {
636                 if is_float {
637                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
638                 } else {
639                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
640                 }
641             }
642         }
643     }
644
645     pub fn codegen_fat_ptr_binop(
646         &mut self,
647         bx: &mut Bx,
648         op: mir::BinOp,
649         lhs_addr: Bx::Value,
650         lhs_extra: Bx::Value,
651         rhs_addr: Bx::Value,
652         rhs_extra: Bx::Value,
653         _input_ty: Ty<'tcx>,
654     ) -> Bx::Value {
655         match op {
656             mir::BinOp::Eq => {
657                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
658                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
659                 bx.and(lhs, rhs)
660             }
661             mir::BinOp::Ne => {
662                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
663                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
664                 bx.or(lhs, rhs)
665             }
666             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
667                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
668                 let (op, strict_op) = match op {
669                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
670                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
671                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
672                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
673                     _ => bug!(),
674                 };
675                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
676                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
677                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
678                 let rhs = bx.and(and_lhs, and_rhs);
679                 bx.or(lhs, rhs)
680             }
681             _ => {
682                 bug!("unexpected fat ptr binop");
683             }
684         }
685     }
686
687     pub fn codegen_scalar_checked_binop(
688         &mut self,
689         bx: &mut Bx,
690         op: mir::BinOp,
691         lhs: Bx::Value,
692         rhs: Bx::Value,
693         input_ty: Ty<'tcx>,
694     ) -> OperandValue<Bx::Value> {
695         // This case can currently arise only from functions marked
696         // with #[rustc_inherit_overflow_checks] and inlined from
697         // another crate (mostly core::num generic/#[inline] fns),
698         // while the current crate doesn't use overflow checks.
699         if !bx.cx().check_overflow() {
700             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
701             return OperandValue::Pair(val, bx.cx().const_bool(false));
702         }
703
704         let (val, of) = match op {
705             // These are checked using intrinsics
706             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
707                 let oop = match op {
708                     mir::BinOp::Add => OverflowOp::Add,
709                     mir::BinOp::Sub => OverflowOp::Sub,
710                     mir::BinOp::Mul => OverflowOp::Mul,
711                     _ => unreachable!(),
712                 };
713                 bx.checked_binop(oop, input_ty, lhs, rhs)
714             }
715             mir::BinOp::Shl | mir::BinOp::Shr => {
716                 let lhs_llty = bx.cx().val_ty(lhs);
717                 let rhs_llty = bx.cx().val_ty(rhs);
718                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
719                 let outer_bits = bx.and(rhs, invert_mask);
720
721                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
722                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
723
724                 (val, of)
725             }
726             _ => bug!("Operator `{:?}` is not a checkable operator", op),
727         };
728
729         OperandValue::Pair(val, of)
730     }
731 }
732
733 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
734     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
735         match *rvalue {
736             mir::Rvalue::Ref(..) |
737             mir::Rvalue::CopyForDeref(..) |
738             mir::Rvalue::AddressOf(..) |
739             mir::Rvalue::Len(..) |
740             mir::Rvalue::Cast(..) | // (*)
741             mir::Rvalue::ShallowInitBox(..) | // (*)
742             mir::Rvalue::BinaryOp(..) |
743             mir::Rvalue::CheckedBinaryOp(..) |
744             mir::Rvalue::UnaryOp(..) |
745             mir::Rvalue::Discriminant(..) |
746             mir::Rvalue::NullaryOp(..) |
747             mir::Rvalue::ThreadLocalRef(_) |
748             mir::Rvalue::Use(..) => // (*)
749                 true,
750             mir::Rvalue::Repeat(..) |
751             mir::Rvalue::Aggregate(..) => {
752                 let ty = rvalue.ty(self.mir, self.cx.tcx());
753                 let ty = self.monomorphize(ty);
754                 self.cx.spanned_layout_of(ty, span).is_zst()
755             }
756         }
757
758         // (*) this is only true if the type is suitable
759     }
760 }