]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Auto merge of #101832 - compiler-errors:dyn-star-plus, r=eholk
[rust.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_middle::mir;
11 use rustc_middle::mir::Operand;
12 use rustc_middle::ty::cast::{CastTy, IntTy};
13 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
14 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
15 use rustc_span::source_map::{Span, DUMMY_SP};
16
17 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18     #[instrument(level = "trace", skip(self, bx))]
19     pub fn codegen_rvalue(
20         &mut self,
21         mut bx: Bx,
22         dest: PlaceRef<'tcx, Bx::Value>,
23         rvalue: &mir::Rvalue<'tcx>,
24     ) -> Bx {
25         match *rvalue {
26             mir::Rvalue::Use(ref operand) => {
27                 let cg_operand = self.codegen_operand(&mut bx, operand);
28                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
29                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
30                 cg_operand.val.store(&mut bx, dest);
31                 bx
32             }
33
34             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
35                 // The destination necessarily contains a fat pointer, so if
36                 // it's a scalar pair, it's a fat pointer or newtype thereof.
37                 if bx.cx().is_backend_scalar_pair(dest.layout) {
38                     // Into-coerce of a thin pointer to a fat pointer -- just
39                     // use the operand path.
40                     let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
41                     temp.val.store(&mut bx, dest);
42                     return bx;
43                 }
44
45                 // Unsize of a nontrivial struct. I would prefer for
46                 // this to be eliminated by MIR building, but
47                 // `CoerceUnsized` can be passed by a where-clause,
48                 // so the (generic) MIR may not be able to expand it.
49                 let operand = self.codegen_operand(&mut bx, source);
50                 match operand.val {
51                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
52                         // Unsize from an immediate structure. We don't
53                         // really need a temporary alloca here, but
54                         // avoiding it would require us to have
55                         // `coerce_unsized_into` use `extractvalue` to
56                         // index into the struct, and this case isn't
57                         // important enough for it.
58                         debug!("codegen_rvalue: creating ugly alloca");
59                         let scratch = PlaceRef::alloca(&mut bx, operand.layout);
60                         scratch.storage_live(&mut bx);
61                         operand.val.store(&mut bx, scratch);
62                         base::coerce_unsized_into(&mut bx, scratch, dest);
63                         scratch.storage_dead(&mut bx);
64                     }
65                     OperandValue::Ref(llref, None, align) => {
66                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
67                         base::coerce_unsized_into(&mut bx, source, dest);
68                     }
69                     OperandValue::Ref(_, Some(_), _) => {
70                         bug!("unsized coercion on an unsized rvalue");
71                     }
72                 }
73                 bx
74             }
75
76             mir::Rvalue::Repeat(ref elem, count) => {
77                 let cg_elem = self.codegen_operand(&mut bx, elem);
78
79                 // Do not generate the loop for zero-sized elements or empty arrays.
80                 if dest.layout.is_zst() {
81                     return bx;
82                 }
83
84                 if let OperandValue::Immediate(v) = cg_elem.val {
85                     let zero = bx.const_usize(0);
86                     let start = dest.project_index(&mut bx, zero).llval;
87                     let size = bx.const_usize(dest.layout.size.bytes());
88
89                     // Use llvm.memset.p0i8.* to initialize all zero arrays
90                     if bx.cx().const_to_opt_u128(v, false) == Some(0) {
91                         let fill = bx.cx().const_u8(0);
92                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
93                         return bx;
94                     }
95
96                     // Use llvm.memset.p0i8.* to initialize byte arrays
97                     let v = bx.from_immediate(v);
98                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
99                         bx.memset(start, v, size, dest.align, MemFlags::empty());
100                         return bx;
101                     }
102                 }
103
104                 let count =
105                     self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
106
107                 bx.write_operand_repeatedly(cg_elem, count, dest)
108             }
109
110             mir::Rvalue::Aggregate(ref kind, ref operands) => {
111                 let (dest, active_field_index) = match **kind {
112                     mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
113                         dest.codegen_set_discr(&mut bx, variant_index);
114                         if bx.tcx().adt_def(adt_did).is_enum() {
115                             (dest.project_downcast(&mut bx, variant_index), active_field_index)
116                         } else {
117                             (dest, active_field_index)
118                         }
119                     }
120                     _ => (dest, None),
121                 };
122                 for (i, operand) in operands.iter().enumerate() {
123                     let op = self.codegen_operand(&mut bx, operand);
124                     // Do not generate stores and GEPis for zero-sized fields.
125                     if !op.layout.is_zst() {
126                         let field_index = active_field_index.unwrap_or(i);
127                         let field = if let mir::AggregateKind::Array(_) = **kind {
128                             let llindex = bx.cx().const_usize(field_index as u64);
129                             dest.project_index(&mut bx, llindex)
130                         } else {
131                             dest.project_field(&mut bx, field_index)
132                         };
133                         op.val.store(&mut bx, field);
134                     }
135                 }
136                 bx
137             }
138
139             _ => {
140                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
141                 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
142                 temp.val.store(&mut bx, dest);
143                 bx
144             }
145         }
146     }
147
148     pub fn codegen_rvalue_unsized(
149         &mut self,
150         mut bx: Bx,
151         indirect_dest: PlaceRef<'tcx, Bx::Value>,
152         rvalue: &mir::Rvalue<'tcx>,
153     ) -> Bx {
154         debug!(
155             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
156             indirect_dest.llval, rvalue
157         );
158
159         match *rvalue {
160             mir::Rvalue::Use(ref operand) => {
161                 let cg_operand = self.codegen_operand(&mut bx, operand);
162                 cg_operand.val.store_unsized(&mut bx, indirect_dest);
163                 bx
164             }
165
166             _ => bug!("unsized assignment other than `Rvalue::Use`"),
167         }
168     }
169
170     pub fn codegen_rvalue_operand(
171         &mut self,
172         mut bx: Bx,
173         rvalue: &mir::Rvalue<'tcx>,
174     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
175         assert!(
176             self.rvalue_creates_operand(rvalue, DUMMY_SP),
177             "cannot codegen {:?} to operand",
178             rvalue,
179         );
180
181         match *rvalue {
182             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
183                 let operand = self.codegen_operand(&mut bx, source);
184                 debug!("cast operand is {:?}", operand);
185                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
186
187                 let val = match *kind {
188                     mir::CastKind::PointerExposeAddress => {
189                         assert!(bx.cx().is_backend_immediate(cast));
190                         let llptr = operand.immediate();
191                         let llcast_ty = bx.cx().immediate_backend_type(cast);
192                         let lladdr = bx.ptrtoint(llptr, llcast_ty);
193                         OperandValue::Immediate(lladdr)
194                     }
195                     mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
196                         match *operand.layout.ty.kind() {
197                             ty::FnDef(def_id, substs) => {
198                                 let instance = ty::Instance::resolve_for_fn_ptr(
199                                     bx.tcx(),
200                                     ty::ParamEnv::reveal_all(),
201                                     def_id,
202                                     substs,
203                                 )
204                                 .unwrap()
205                                 .polymorphize(bx.cx().tcx());
206                                 OperandValue::Immediate(bx.get_fn_addr(instance))
207                             }
208                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
209                         }
210                     }
211                     mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
212                         match *operand.layout.ty.kind() {
213                             ty::Closure(def_id, substs) => {
214                                 let instance = Instance::resolve_closure(
215                                     bx.cx().tcx(),
216                                     def_id,
217                                     substs,
218                                     ty::ClosureKind::FnOnce,
219                                 )
220                                 .expect("failed to normalize and resolve closure during codegen")
221                                 .polymorphize(bx.cx().tcx());
222                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
223                             }
224                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
225                         }
226                     }
227                     mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
228                         // This is a no-op at the LLVM level.
229                         operand.val
230                     }
231                     mir::CastKind::Pointer(PointerCast::Unsize) => {
232                         assert!(bx.cx().is_backend_scalar_pair(cast));
233                         let (lldata, llextra) = match operand.val {
234                             OperandValue::Pair(lldata, llextra) => {
235                                 // unsize from a fat pointer -- this is a
236                                 // "trait-object-to-supertrait" coercion.
237                                 (lldata, Some(llextra))
238                             }
239                             OperandValue::Immediate(lldata) => {
240                                 // "standard" unsize
241                                 (lldata, None)
242                             }
243                             OperandValue::Ref(..) => {
244                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
245                             }
246                         };
247                         let (lldata, llextra) =
248                             base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
249                         OperandValue::Pair(lldata, llextra)
250                     }
251                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
252                     | mir::CastKind::PtrToPtr
253                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
254                     {
255                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
256                             if bx.cx().is_backend_scalar_pair(cast) {
257                                 let data_cast = bx.pointercast(
258                                     data_ptr,
259                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
260                                 );
261                                 OperandValue::Pair(data_cast, meta)
262                             } else {
263                                 // cast to thin-ptr
264                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
265                                 // pointer-cast of that pointer to desired pointer type.
266                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
267                                 let llval = bx.pointercast(data_ptr, llcast_ty);
268                                 OperandValue::Immediate(llval)
269                             }
270                         } else {
271                             bug!("unexpected non-pair operand");
272                         }
273                     }
274                     mir::CastKind::DynStar => {
275                         let (lldata, llextra) = match operand.val {
276                             OperandValue::Ref(_, _, _) => todo!(),
277                             OperandValue::Immediate(v) => (v, None),
278                             OperandValue::Pair(v, l) => (v, Some(l)),
279                         };
280                         let (lldata, llextra) =
281                             base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra);
282                         OperandValue::Pair(lldata, llextra)
283                     }
284                     mir::CastKind::Pointer(
285                         PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
286                     )
287                     | mir::CastKind::IntToInt
288                     | mir::CastKind::FloatToInt
289                     | mir::CastKind::FloatToFloat
290                     | mir::CastKind::IntToFloat
291                     | mir::CastKind::PtrToPtr
292                     | mir::CastKind::FnPtrToPtr
293
294                     // Since int2ptr can have arbitrary integer types as input (so we have to do
295                     // sign extension and all that), it is currently best handled in the same code
296                     // path as the other integer-to-X casts.
297                     | mir::CastKind::PointerFromExposedAddress => {
298                         assert!(bx.cx().is_backend_immediate(cast));
299                         let ll_t_out = bx.cx().immediate_backend_type(cast);
300                         if operand.layout.abi.is_uninhabited() {
301                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
302                             return (bx, OperandRef { val, layout: cast });
303                         }
304                         let r_t_in =
305                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
306                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
307                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
308                         let llval = operand.immediate();
309
310                         let newval = match (r_t_in, r_t_out) {
311                             (CastTy::Int(i), CastTy::Int(_)) => {
312                                 bx.intcast(llval, ll_t_out, i.is_signed())
313                             }
314                             (CastTy::Float, CastTy::Float) => {
315                                 let srcsz = bx.cx().float_width(ll_t_in);
316                                 let dstsz = bx.cx().float_width(ll_t_out);
317                                 if dstsz > srcsz {
318                                     bx.fpext(llval, ll_t_out)
319                                 } else if srcsz > dstsz {
320                                     bx.fptrunc(llval, ll_t_out)
321                                 } else {
322                                     llval
323                                 }
324                             }
325                             (CastTy::Int(i), CastTy::Float) => {
326                                 if i.is_signed() {
327                                     bx.sitofp(llval, ll_t_out)
328                                 } else {
329                                     bx.uitofp(llval, ll_t_out)
330                                 }
331                             }
332                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
333                                 bx.pointercast(llval, ll_t_out)
334                             }
335                             (CastTy::Int(i), CastTy::Ptr(_)) => {
336                                 let usize_llval =
337                                     bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
338                                 bx.inttoptr(usize_llval, ll_t_out)
339                             }
340                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
341                                 bx.cast_float_to_int(true, llval, ll_t_out)
342                             }
343                             (CastTy::Float, CastTy::Int(_)) => {
344                                 bx.cast_float_to_int(false, llval, ll_t_out)
345                             }
346                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
347                         };
348                         OperandValue::Immediate(newval)
349                     }
350                 };
351                 (bx, OperandRef { val, layout: cast })
352             }
353
354             mir::Rvalue::Ref(_, bk, place) => {
355                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
356                     tcx.mk_ref(
357                         tcx.lifetimes.re_erased,
358                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
359                     )
360                 };
361                 self.codegen_place_to_pointer(bx, place, mk_ref)
362             }
363
364             mir::Rvalue::CopyForDeref(place) => {
365                 let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
366                 (bx, operand)
367             }
368             mir::Rvalue::AddressOf(mutability, place) => {
369                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
370                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
371                 };
372                 self.codegen_place_to_pointer(bx, place, mk_ptr)
373             }
374
375             mir::Rvalue::Len(place) => {
376                 let size = self.evaluate_array_len(&mut bx, place);
377                 let operand = OperandRef {
378                     val: OperandValue::Immediate(size),
379                     layout: bx.cx().layout_of(bx.tcx().types.usize),
380                 };
381                 (bx, operand)
382             }
383
384             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
385                 let lhs = self.codegen_operand(&mut bx, lhs);
386                 let rhs = self.codegen_operand(&mut bx, rhs);
387                 let llresult = match (lhs.val, rhs.val) {
388                     (
389                         OperandValue::Pair(lhs_addr, lhs_extra),
390                         OperandValue::Pair(rhs_addr, rhs_extra),
391                     ) => self.codegen_fat_ptr_binop(
392                         &mut bx,
393                         op,
394                         lhs_addr,
395                         lhs_extra,
396                         rhs_addr,
397                         rhs_extra,
398                         lhs.layout.ty,
399                     ),
400
401                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
402                         self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
403                     }
404
405                     _ => bug!(),
406                 };
407                 let operand = OperandRef {
408                     val: OperandValue::Immediate(llresult),
409                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
410                 };
411                 (bx, operand)
412             }
413             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
414                 let lhs = self.codegen_operand(&mut bx, lhs);
415                 let rhs = self.codegen_operand(&mut bx, rhs);
416                 let result = self.codegen_scalar_checked_binop(
417                     &mut bx,
418                     op,
419                     lhs.immediate(),
420                     rhs.immediate(),
421                     lhs.layout.ty,
422                 );
423                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
424                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
425                 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
426
427                 (bx, operand)
428             }
429
430             mir::Rvalue::UnaryOp(op, ref operand) => {
431                 let operand = self.codegen_operand(&mut bx, operand);
432                 let lloperand = operand.immediate();
433                 let is_float = operand.layout.ty.is_floating_point();
434                 let llval = match op {
435                     mir::UnOp::Not => bx.not(lloperand),
436                     mir::UnOp::Neg => {
437                         if is_float {
438                             bx.fneg(lloperand)
439                         } else {
440                             bx.neg(lloperand)
441                         }
442                     }
443                 };
444                 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
445             }
446
447             mir::Rvalue::Discriminant(ref place) => {
448                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
449                 let discr_ty = self.monomorphize(discr_ty);
450                 let discr = self
451                     .codegen_place(&mut bx, place.as_ref())
452                     .codegen_get_discr(&mut bx, discr_ty);
453                 (
454                     bx,
455                     OperandRef {
456                         val: OperandValue::Immediate(discr),
457                         layout: self.cx.layout_of(discr_ty),
458                     },
459                 )
460             }
461
462             mir::Rvalue::NullaryOp(null_op, ty) => {
463                 let ty = self.monomorphize(ty);
464                 assert!(bx.cx().type_is_sized(ty));
465                 let layout = bx.cx().layout_of(ty);
466                 let val = match null_op {
467                     mir::NullOp::SizeOf => layout.size.bytes(),
468                     mir::NullOp::AlignOf => layout.align.abi.bytes(),
469                 };
470                 let val = bx.cx().const_usize(val);
471                 let tcx = self.cx.tcx();
472                 (
473                     bx,
474                     OperandRef {
475                         val: OperandValue::Immediate(val),
476                         layout: self.cx.layout_of(tcx.types.usize),
477                     },
478                 )
479             }
480
481             mir::Rvalue::ThreadLocalRef(def_id) => {
482                 assert!(bx.cx().tcx().is_static(def_id));
483                 let static_ = bx.get_static(def_id);
484                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
485                 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
486                 (bx, operand)
487             }
488             mir::Rvalue::Use(ref operand) => {
489                 let operand = self.codegen_operand(&mut bx, operand);
490                 (bx, operand)
491             }
492             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
493                 // According to `rvalue_creates_operand`, only ZST
494                 // aggregate rvalues are allowed to be operands.
495                 let ty = rvalue.ty(self.mir, self.cx.tcx());
496                 let operand =
497                     OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
498                 (bx, operand)
499             }
500             mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
501                 let operand = self.codegen_operand(&mut bx, operand);
502                 let lloperand = operand.immediate();
503
504                 let content_ty = self.monomorphize(content_ty);
505                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
506                 let llty_ptr = bx.cx().backend_type(box_layout);
507
508                 let val = bx.pointercast(lloperand, llty_ptr);
509                 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
510                 (bx, operand)
511             }
512         }
513     }
514
515     fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
516         // ZST are passed as operands and require special handling
517         // because codegen_place() panics if Local is operand.
518         if let Some(index) = place.as_local() {
519             if let LocalRef::Operand(Some(op)) = self.locals[index] {
520                 if let ty::Array(_, n) = op.layout.ty.kind() {
521                     let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
522                     return bx.cx().const_usize(n);
523                 }
524             }
525         }
526         // use common size calculation for non zero-sized types
527         let cg_value = self.codegen_place(bx, place.as_ref());
528         cg_value.len(bx.cx())
529     }
530
531     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
532     fn codegen_place_to_pointer(
533         &mut self,
534         mut bx: Bx,
535         place: mir::Place<'tcx>,
536         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
537     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
538         let cg_place = self.codegen_place(&mut bx, place.as_ref());
539
540         let ty = cg_place.layout.ty;
541
542         // Note: places are indirect, so storing the `llval` into the
543         // destination effectively creates a reference.
544         let val = if !bx.cx().type_has_metadata(ty) {
545             OperandValue::Immediate(cg_place.llval)
546         } else {
547             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
548         };
549         (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
550     }
551
552     pub fn codegen_scalar_binop(
553         &mut self,
554         bx: &mut Bx,
555         op: mir::BinOp,
556         lhs: Bx::Value,
557         rhs: Bx::Value,
558         input_ty: Ty<'tcx>,
559     ) -> Bx::Value {
560         let is_float = input_ty.is_floating_point();
561         let is_signed = input_ty.is_signed();
562         match op {
563             mir::BinOp::Add => {
564                 if is_float {
565                     bx.fadd(lhs, rhs)
566                 } else {
567                     bx.add(lhs, rhs)
568                 }
569             }
570             mir::BinOp::Sub => {
571                 if is_float {
572                     bx.fsub(lhs, rhs)
573                 } else {
574                     bx.sub(lhs, rhs)
575                 }
576             }
577             mir::BinOp::Mul => {
578                 if is_float {
579                     bx.fmul(lhs, rhs)
580                 } else {
581                     bx.mul(lhs, rhs)
582                 }
583             }
584             mir::BinOp::Div => {
585                 if is_float {
586                     bx.fdiv(lhs, rhs)
587                 } else if is_signed {
588                     bx.sdiv(lhs, rhs)
589                 } else {
590                     bx.udiv(lhs, rhs)
591                 }
592             }
593             mir::BinOp::Rem => {
594                 if is_float {
595                     bx.frem(lhs, rhs)
596                 } else if is_signed {
597                     bx.srem(lhs, rhs)
598                 } else {
599                     bx.urem(lhs, rhs)
600                 }
601             }
602             mir::BinOp::BitOr => bx.or(lhs, rhs),
603             mir::BinOp::BitAnd => bx.and(lhs, rhs),
604             mir::BinOp::BitXor => bx.xor(lhs, rhs),
605             mir::BinOp::Offset => {
606                 let pointee_type = input_ty
607                     .builtin_deref(true)
608                     .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
609                     .ty;
610                 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
611                 bx.inbounds_gep(llty, lhs, &[rhs])
612             }
613             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
614             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
615             mir::BinOp::Ne
616             | mir::BinOp::Lt
617             | mir::BinOp::Gt
618             | mir::BinOp::Eq
619             | mir::BinOp::Le
620             | mir::BinOp::Ge => {
621                 if is_float {
622                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
623                 } else {
624                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
625                 }
626             }
627         }
628     }
629
630     pub fn codegen_fat_ptr_binop(
631         &mut self,
632         bx: &mut Bx,
633         op: mir::BinOp,
634         lhs_addr: Bx::Value,
635         lhs_extra: Bx::Value,
636         rhs_addr: Bx::Value,
637         rhs_extra: Bx::Value,
638         _input_ty: Ty<'tcx>,
639     ) -> Bx::Value {
640         match op {
641             mir::BinOp::Eq => {
642                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
643                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
644                 bx.and(lhs, rhs)
645             }
646             mir::BinOp::Ne => {
647                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
648                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
649                 bx.or(lhs, rhs)
650             }
651             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
652                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
653                 let (op, strict_op) = match op {
654                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
655                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
656                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
657                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
658                     _ => bug!(),
659                 };
660                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
661                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
662                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
663                 let rhs = bx.and(and_lhs, and_rhs);
664                 bx.or(lhs, rhs)
665             }
666             _ => {
667                 bug!("unexpected fat ptr binop");
668             }
669         }
670     }
671
672     pub fn codegen_scalar_checked_binop(
673         &mut self,
674         bx: &mut Bx,
675         op: mir::BinOp,
676         lhs: Bx::Value,
677         rhs: Bx::Value,
678         input_ty: Ty<'tcx>,
679     ) -> OperandValue<Bx::Value> {
680         // This case can currently arise only from functions marked
681         // with #[rustc_inherit_overflow_checks] and inlined from
682         // another crate (mostly core::num generic/#[inline] fns),
683         // while the current crate doesn't use overflow checks.
684         if !bx.cx().check_overflow() {
685             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
686             return OperandValue::Pair(val, bx.cx().const_bool(false));
687         }
688
689         let (val, of) = match op {
690             // These are checked using intrinsics
691             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
692                 let oop = match op {
693                     mir::BinOp::Add => OverflowOp::Add,
694                     mir::BinOp::Sub => OverflowOp::Sub,
695                     mir::BinOp::Mul => OverflowOp::Mul,
696                     _ => unreachable!(),
697                 };
698                 bx.checked_binop(oop, input_ty, lhs, rhs)
699             }
700             mir::BinOp::Shl | mir::BinOp::Shr => {
701                 let lhs_llty = bx.cx().val_ty(lhs);
702                 let rhs_llty = bx.cx().val_ty(rhs);
703                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
704                 let outer_bits = bx.and(rhs, invert_mask);
705
706                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
707                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
708
709                 (val, of)
710             }
711             _ => bug!("Operator `{:?}` is not a checkable operator", op),
712         };
713
714         OperandValue::Pair(val, of)
715     }
716 }
717
718 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
719     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
720         match *rvalue {
721             mir::Rvalue::Ref(..) |
722             mir::Rvalue::CopyForDeref(..) |
723             mir::Rvalue::AddressOf(..) |
724             mir::Rvalue::Len(..) |
725             mir::Rvalue::Cast(..) | // (*)
726             mir::Rvalue::ShallowInitBox(..) | // (*)
727             mir::Rvalue::BinaryOp(..) |
728             mir::Rvalue::CheckedBinaryOp(..) |
729             mir::Rvalue::UnaryOp(..) |
730             mir::Rvalue::Discriminant(..) |
731             mir::Rvalue::NullaryOp(..) |
732             mir::Rvalue::ThreadLocalRef(_) |
733             mir::Rvalue::Use(..) => // (*)
734                 true,
735             mir::Rvalue::Repeat(..) |
736             mir::Rvalue::Aggregate(..) => {
737                 let ty = rvalue.ty(self.mir, self.cx.tcx());
738                 let ty = self.monomorphize(ty);
739                 self.cx.spanned_layout_of(ty, span).is_zst()
740             }
741         }
742
743         // (*) this is only true if the type is suitable
744     }
745 }