]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Rollup merge of #101634 - aDotInTheVoid:rdj-test, r=CraftSpider
[rust.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_middle::mir;
11 use rustc_middle::mir::Operand;
12 use rustc_middle::ty::cast::{CastTy, IntTy};
13 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
14 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
15 use rustc_span::source_map::{Span, DUMMY_SP};
16
17 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18     #[instrument(level = "trace", skip(self, bx))]
19     pub fn codegen_rvalue(
20         &mut self,
21         mut bx: Bx,
22         dest: PlaceRef<'tcx, Bx::Value>,
23         rvalue: &mir::Rvalue<'tcx>,
24     ) -> Bx {
25         match *rvalue {
26             mir::Rvalue::Use(ref operand) => {
27                 let cg_operand = self.codegen_operand(&mut bx, operand);
28                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
29                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
30                 cg_operand.val.store(&mut bx, dest);
31                 bx
32             }
33
34             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
35                 // The destination necessarily contains a fat pointer, so if
36                 // it's a scalar pair, it's a fat pointer or newtype thereof.
37                 if bx.cx().is_backend_scalar_pair(dest.layout) {
38                     // Into-coerce of a thin pointer to a fat pointer -- just
39                     // use the operand path.
40                     let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
41                     temp.val.store(&mut bx, dest);
42                     return bx;
43                 }
44
45                 // Unsize of a nontrivial struct. I would prefer for
46                 // this to be eliminated by MIR building, but
47                 // `CoerceUnsized` can be passed by a where-clause,
48                 // so the (generic) MIR may not be able to expand it.
49                 let operand = self.codegen_operand(&mut bx, source);
50                 match operand.val {
51                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
52                         // Unsize from an immediate structure. We don't
53                         // really need a temporary alloca here, but
54                         // avoiding it would require us to have
55                         // `coerce_unsized_into` use `extractvalue` to
56                         // index into the struct, and this case isn't
57                         // important enough for it.
58                         debug!("codegen_rvalue: creating ugly alloca");
59                         let scratch = PlaceRef::alloca(&mut bx, operand.layout);
60                         scratch.storage_live(&mut bx);
61                         operand.val.store(&mut bx, scratch);
62                         base::coerce_unsized_into(&mut bx, scratch, dest);
63                         scratch.storage_dead(&mut bx);
64                     }
65                     OperandValue::Ref(llref, None, align) => {
66                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
67                         base::coerce_unsized_into(&mut bx, source, dest);
68                     }
69                     OperandValue::Ref(_, Some(_), _) => {
70                         bug!("unsized coercion on an unsized rvalue");
71                     }
72                 }
73                 bx
74             }
75
76             mir::Rvalue::Repeat(ref elem, count) => {
77                 let cg_elem = self.codegen_operand(&mut bx, elem);
78
79                 // Do not generate the loop for zero-sized elements or empty arrays.
80                 if dest.layout.is_zst() {
81                     return bx;
82                 }
83
84                 if let OperandValue::Immediate(v) = cg_elem.val {
85                     let zero = bx.const_usize(0);
86                     let start = dest.project_index(&mut bx, zero).llval;
87                     let size = bx.const_usize(dest.layout.size.bytes());
88
89                     // Use llvm.memset.p0i8.* to initialize all zero arrays
90                     if bx.cx().const_to_opt_u128(v, false) == Some(0) {
91                         let fill = bx.cx().const_u8(0);
92                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
93                         return bx;
94                     }
95
96                     // Use llvm.memset.p0i8.* to initialize byte arrays
97                     let v = bx.from_immediate(v);
98                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
99                         bx.memset(start, v, size, dest.align, MemFlags::empty());
100                         return bx;
101                     }
102                 }
103
104                 let count =
105                     self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
106
107                 bx.write_operand_repeatedly(cg_elem, count, dest)
108             }
109
110             mir::Rvalue::Aggregate(ref kind, ref operands) => {
111                 let (dest, active_field_index) = match **kind {
112                     mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
113                         dest.codegen_set_discr(&mut bx, variant_index);
114                         if bx.tcx().adt_def(adt_did).is_enum() {
115                             (dest.project_downcast(&mut bx, variant_index), active_field_index)
116                         } else {
117                             (dest, active_field_index)
118                         }
119                     }
120                     _ => (dest, None),
121                 };
122                 for (i, operand) in operands.iter().enumerate() {
123                     let op = self.codegen_operand(&mut bx, operand);
124                     // Do not generate stores and GEPis for zero-sized fields.
125                     if !op.layout.is_zst() {
126                         let field_index = active_field_index.unwrap_or(i);
127                         let field = if let mir::AggregateKind::Array(_) = **kind {
128                             let llindex = bx.cx().const_usize(field_index as u64);
129                             dest.project_index(&mut bx, llindex)
130                         } else {
131                             dest.project_field(&mut bx, field_index)
132                         };
133                         op.val.store(&mut bx, field);
134                     }
135                 }
136                 bx
137             }
138
139             _ => {
140                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
141                 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
142                 temp.val.store(&mut bx, dest);
143                 bx
144             }
145         }
146     }
147
148     pub fn codegen_rvalue_unsized(
149         &mut self,
150         mut bx: Bx,
151         indirect_dest: PlaceRef<'tcx, Bx::Value>,
152         rvalue: &mir::Rvalue<'tcx>,
153     ) -> Bx {
154         debug!(
155             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
156             indirect_dest.llval, rvalue
157         );
158
159         match *rvalue {
160             mir::Rvalue::Use(ref operand) => {
161                 let cg_operand = self.codegen_operand(&mut bx, operand);
162                 cg_operand.val.store_unsized(&mut bx, indirect_dest);
163                 bx
164             }
165
166             _ => bug!("unsized assignment other than `Rvalue::Use`"),
167         }
168     }
169
170     pub fn codegen_rvalue_operand(
171         &mut self,
172         mut bx: Bx,
173         rvalue: &mir::Rvalue<'tcx>,
174     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
175         assert!(
176             self.rvalue_creates_operand(rvalue, DUMMY_SP),
177             "cannot codegen {:?} to operand",
178             rvalue,
179         );
180
181         match *rvalue {
182             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
183                 let operand = self.codegen_operand(&mut bx, source);
184                 debug!("cast operand is {:?}", operand);
185                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
186
187                 let val = match *kind {
188                     mir::CastKind::PointerExposeAddress => {
189                         assert!(bx.cx().is_backend_immediate(cast));
190                         let llptr = operand.immediate();
191                         let llcast_ty = bx.cx().immediate_backend_type(cast);
192                         let lladdr = bx.ptrtoint(llptr, llcast_ty);
193                         OperandValue::Immediate(lladdr)
194                     }
195                     mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
196                         match *operand.layout.ty.kind() {
197                             ty::FnDef(def_id, substs) => {
198                                 let instance = ty::Instance::resolve_for_fn_ptr(
199                                     bx.tcx(),
200                                     ty::ParamEnv::reveal_all(),
201                                     def_id,
202                                     substs,
203                                 )
204                                 .unwrap()
205                                 .polymorphize(bx.cx().tcx());
206                                 OperandValue::Immediate(bx.get_fn_addr(instance))
207                             }
208                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
209                         }
210                     }
211                     mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
212                         match *operand.layout.ty.kind() {
213                             ty::Closure(def_id, substs) => {
214                                 let instance = Instance::resolve_closure(
215                                     bx.cx().tcx(),
216                                     def_id,
217                                     substs,
218                                     ty::ClosureKind::FnOnce,
219                                 )
220                                 .expect("failed to normalize and resolve closure during codegen")
221                                 .polymorphize(bx.cx().tcx());
222                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
223                             }
224                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
225                         }
226                     }
227                     mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
228                         // This is a no-op at the LLVM level.
229                         operand.val
230                     }
231                     mir::CastKind::Pointer(PointerCast::Unsize) => {
232                         assert!(bx.cx().is_backend_scalar_pair(cast));
233                         let (lldata, llextra) = match operand.val {
234                             OperandValue::Pair(lldata, llextra) => {
235                                 // unsize from a fat pointer -- this is a
236                                 // "trait-object-to-supertrait" coercion.
237                                 (lldata, Some(llextra))
238                             }
239                             OperandValue::Immediate(lldata) => {
240                                 // "standard" unsize
241                                 (lldata, None)
242                             }
243                             OperandValue::Ref(..) => {
244                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
245                             }
246                         };
247                         let (lldata, llextra) =
248                             base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
249                         OperandValue::Pair(lldata, llextra)
250                     }
251                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
252                     | mir::CastKind::Misc
253                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
254                     {
255                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
256                             if bx.cx().is_backend_scalar_pair(cast) {
257                                 let data_cast = bx.pointercast(
258                                     data_ptr,
259                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
260                                 );
261                                 OperandValue::Pair(data_cast, meta)
262                             } else {
263                                 // cast to thin-ptr
264                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
265                                 // pointer-cast of that pointer to desired pointer type.
266                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
267                                 let llval = bx.pointercast(data_ptr, llcast_ty);
268                                 OperandValue::Immediate(llval)
269                             }
270                         } else {
271                             bug!("unexpected non-pair operand");
272                         }
273                     }
274                     mir::CastKind::Pointer(
275                         PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
276                     )
277                     | mir::CastKind::Misc
278                     // Since int2ptr can have arbitrary integer types as input (so we have to do
279                     // sign extension and all that), it is currently best handled in the same code
280                     // path as the other integer-to-X casts.
281                     | mir::CastKind::PointerFromExposedAddress => {
282                         assert!(bx.cx().is_backend_immediate(cast));
283                         let ll_t_out = bx.cx().immediate_backend_type(cast);
284                         if operand.layout.abi.is_uninhabited() {
285                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
286                             return (bx, OperandRef { val, layout: cast });
287                         }
288                         let r_t_in =
289                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
290                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
291                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
292                         let llval = operand.immediate();
293
294                         let newval = match (r_t_in, r_t_out) {
295                             (CastTy::Int(i), CastTy::Int(_)) => {
296                                 bx.intcast(llval, ll_t_out, i.is_signed())
297                             }
298                             (CastTy::Float, CastTy::Float) => {
299                                 let srcsz = bx.cx().float_width(ll_t_in);
300                                 let dstsz = bx.cx().float_width(ll_t_out);
301                                 if dstsz > srcsz {
302                                     bx.fpext(llval, ll_t_out)
303                                 } else if srcsz > dstsz {
304                                     bx.fptrunc(llval, ll_t_out)
305                                 } else {
306                                     llval
307                                 }
308                             }
309                             (CastTy::Int(i), CastTy::Float) => {
310                                 if i.is_signed() {
311                                     bx.sitofp(llval, ll_t_out)
312                                 } else {
313                                     bx.uitofp(llval, ll_t_out)
314                                 }
315                             }
316                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
317                                 bx.pointercast(llval, ll_t_out)
318                             }
319                             (CastTy::Int(i), CastTy::Ptr(_)) => {
320                                 let usize_llval =
321                                     bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
322                                 bx.inttoptr(usize_llval, ll_t_out)
323                             }
324                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
325                                 bx.cast_float_to_int(true, llval, ll_t_out)
326                             }
327                             (CastTy::Float, CastTy::Int(_)) => {
328                                 bx.cast_float_to_int(false, llval, ll_t_out)
329                             }
330                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
331                         };
332                         OperandValue::Immediate(newval)
333                     }
334                 };
335                 (bx, OperandRef { val, layout: cast })
336             }
337
338             mir::Rvalue::Ref(_, bk, place) => {
339                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
340                     tcx.mk_ref(
341                         tcx.lifetimes.re_erased,
342                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
343                     )
344                 };
345                 self.codegen_place_to_pointer(bx, place, mk_ref)
346             }
347
348             mir::Rvalue::CopyForDeref(place) => {
349                 let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
350                 (bx, operand)
351             }
352             mir::Rvalue::AddressOf(mutability, place) => {
353                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
354                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
355                 };
356                 self.codegen_place_to_pointer(bx, place, mk_ptr)
357             }
358
359             mir::Rvalue::Len(place) => {
360                 let size = self.evaluate_array_len(&mut bx, place);
361                 let operand = OperandRef {
362                     val: OperandValue::Immediate(size),
363                     layout: bx.cx().layout_of(bx.tcx().types.usize),
364                 };
365                 (bx, operand)
366             }
367
368             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
369                 let lhs = self.codegen_operand(&mut bx, lhs);
370                 let rhs = self.codegen_operand(&mut bx, rhs);
371                 let llresult = match (lhs.val, rhs.val) {
372                     (
373                         OperandValue::Pair(lhs_addr, lhs_extra),
374                         OperandValue::Pair(rhs_addr, rhs_extra),
375                     ) => self.codegen_fat_ptr_binop(
376                         &mut bx,
377                         op,
378                         lhs_addr,
379                         lhs_extra,
380                         rhs_addr,
381                         rhs_extra,
382                         lhs.layout.ty,
383                     ),
384
385                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
386                         self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
387                     }
388
389                     _ => bug!(),
390                 };
391                 let operand = OperandRef {
392                     val: OperandValue::Immediate(llresult),
393                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
394                 };
395                 (bx, operand)
396             }
397             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
398                 let lhs = self.codegen_operand(&mut bx, lhs);
399                 let rhs = self.codegen_operand(&mut bx, rhs);
400                 let result = self.codegen_scalar_checked_binop(
401                     &mut bx,
402                     op,
403                     lhs.immediate(),
404                     rhs.immediate(),
405                     lhs.layout.ty,
406                 );
407                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
408                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
409                 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
410
411                 (bx, operand)
412             }
413
414             mir::Rvalue::UnaryOp(op, ref operand) => {
415                 let operand = self.codegen_operand(&mut bx, operand);
416                 let lloperand = operand.immediate();
417                 let is_float = operand.layout.ty.is_floating_point();
418                 let llval = match op {
419                     mir::UnOp::Not => bx.not(lloperand),
420                     mir::UnOp::Neg => {
421                         if is_float {
422                             bx.fneg(lloperand)
423                         } else {
424                             bx.neg(lloperand)
425                         }
426                     }
427                 };
428                 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
429             }
430
431             mir::Rvalue::Discriminant(ref place) => {
432                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
433                 let discr_ty = self.monomorphize(discr_ty);
434                 let discr = self
435                     .codegen_place(&mut bx, place.as_ref())
436                     .codegen_get_discr(&mut bx, discr_ty);
437                 (
438                     bx,
439                     OperandRef {
440                         val: OperandValue::Immediate(discr),
441                         layout: self.cx.layout_of(discr_ty),
442                     },
443                 )
444             }
445
446             mir::Rvalue::NullaryOp(null_op, ty) => {
447                 let ty = self.monomorphize(ty);
448                 assert!(bx.cx().type_is_sized(ty));
449                 let layout = bx.cx().layout_of(ty);
450                 let val = match null_op {
451                     mir::NullOp::SizeOf => layout.size.bytes(),
452                     mir::NullOp::AlignOf => layout.align.abi.bytes(),
453                 };
454                 let val = bx.cx().const_usize(val);
455                 let tcx = self.cx.tcx();
456                 (
457                     bx,
458                     OperandRef {
459                         val: OperandValue::Immediate(val),
460                         layout: self.cx.layout_of(tcx.types.usize),
461                     },
462                 )
463             }
464
465             mir::Rvalue::ThreadLocalRef(def_id) => {
466                 assert!(bx.cx().tcx().is_static(def_id));
467                 let static_ = bx.get_static(def_id);
468                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
469                 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
470                 (bx, operand)
471             }
472             mir::Rvalue::Use(ref operand) => {
473                 let operand = self.codegen_operand(&mut bx, operand);
474                 (bx, operand)
475             }
476             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
477                 // According to `rvalue_creates_operand`, only ZST
478                 // aggregate rvalues are allowed to be operands.
479                 let ty = rvalue.ty(self.mir, self.cx.tcx());
480                 let operand =
481                     OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
482                 (bx, operand)
483             }
484             mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
485                 let operand = self.codegen_operand(&mut bx, operand);
486                 let lloperand = operand.immediate();
487
488                 let content_ty = self.monomorphize(content_ty);
489                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
490                 let llty_ptr = bx.cx().backend_type(box_layout);
491
492                 let val = bx.pointercast(lloperand, llty_ptr);
493                 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
494                 (bx, operand)
495             }
496         }
497     }
498
499     fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
500         // ZST are passed as operands and require special handling
501         // because codegen_place() panics if Local is operand.
502         if let Some(index) = place.as_local() {
503             if let LocalRef::Operand(Some(op)) = self.locals[index] {
504                 if let ty::Array(_, n) = op.layout.ty.kind() {
505                     let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
506                     return bx.cx().const_usize(n);
507                 }
508             }
509         }
510         // use common size calculation for non zero-sized types
511         let cg_value = self.codegen_place(bx, place.as_ref());
512         cg_value.len(bx.cx())
513     }
514
515     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
516     fn codegen_place_to_pointer(
517         &mut self,
518         mut bx: Bx,
519         place: mir::Place<'tcx>,
520         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
521     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
522         let cg_place = self.codegen_place(&mut bx, place.as_ref());
523
524         let ty = cg_place.layout.ty;
525
526         // Note: places are indirect, so storing the `llval` into the
527         // destination effectively creates a reference.
528         let val = if !bx.cx().type_has_metadata(ty) {
529             OperandValue::Immediate(cg_place.llval)
530         } else {
531             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
532         };
533         (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
534     }
535
536     pub fn codegen_scalar_binop(
537         &mut self,
538         bx: &mut Bx,
539         op: mir::BinOp,
540         lhs: Bx::Value,
541         rhs: Bx::Value,
542         input_ty: Ty<'tcx>,
543     ) -> Bx::Value {
544         let is_float = input_ty.is_floating_point();
545         let is_signed = input_ty.is_signed();
546         match op {
547             mir::BinOp::Add => {
548                 if is_float {
549                     bx.fadd(lhs, rhs)
550                 } else {
551                     bx.add(lhs, rhs)
552                 }
553             }
554             mir::BinOp::Sub => {
555                 if is_float {
556                     bx.fsub(lhs, rhs)
557                 } else {
558                     bx.sub(lhs, rhs)
559                 }
560             }
561             mir::BinOp::Mul => {
562                 if is_float {
563                     bx.fmul(lhs, rhs)
564                 } else {
565                     bx.mul(lhs, rhs)
566                 }
567             }
568             mir::BinOp::Div => {
569                 if is_float {
570                     bx.fdiv(lhs, rhs)
571                 } else if is_signed {
572                     bx.sdiv(lhs, rhs)
573                 } else {
574                     bx.udiv(lhs, rhs)
575                 }
576             }
577             mir::BinOp::Rem => {
578                 if is_float {
579                     bx.frem(lhs, rhs)
580                 } else if is_signed {
581                     bx.srem(lhs, rhs)
582                 } else {
583                     bx.urem(lhs, rhs)
584                 }
585             }
586             mir::BinOp::BitOr => bx.or(lhs, rhs),
587             mir::BinOp::BitAnd => bx.and(lhs, rhs),
588             mir::BinOp::BitXor => bx.xor(lhs, rhs),
589             mir::BinOp::Offset => {
590                 let pointee_type = input_ty
591                     .builtin_deref(true)
592                     .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
593                     .ty;
594                 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
595                 bx.inbounds_gep(llty, lhs, &[rhs])
596             }
597             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
598             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
599             mir::BinOp::Ne
600             | mir::BinOp::Lt
601             | mir::BinOp::Gt
602             | mir::BinOp::Eq
603             | mir::BinOp::Le
604             | mir::BinOp::Ge => {
605                 if is_float {
606                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
607                 } else {
608                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
609                 }
610             }
611         }
612     }
613
614     pub fn codegen_fat_ptr_binop(
615         &mut self,
616         bx: &mut Bx,
617         op: mir::BinOp,
618         lhs_addr: Bx::Value,
619         lhs_extra: Bx::Value,
620         rhs_addr: Bx::Value,
621         rhs_extra: Bx::Value,
622         _input_ty: Ty<'tcx>,
623     ) -> Bx::Value {
624         match op {
625             mir::BinOp::Eq => {
626                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
627                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
628                 bx.and(lhs, rhs)
629             }
630             mir::BinOp::Ne => {
631                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
632                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
633                 bx.or(lhs, rhs)
634             }
635             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
636                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
637                 let (op, strict_op) = match op {
638                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
639                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
640                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
641                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
642                     _ => bug!(),
643                 };
644                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
645                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
646                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
647                 let rhs = bx.and(and_lhs, and_rhs);
648                 bx.or(lhs, rhs)
649             }
650             _ => {
651                 bug!("unexpected fat ptr binop");
652             }
653         }
654     }
655
656     pub fn codegen_scalar_checked_binop(
657         &mut self,
658         bx: &mut Bx,
659         op: mir::BinOp,
660         lhs: Bx::Value,
661         rhs: Bx::Value,
662         input_ty: Ty<'tcx>,
663     ) -> OperandValue<Bx::Value> {
664         // This case can currently arise only from functions marked
665         // with #[rustc_inherit_overflow_checks] and inlined from
666         // another crate (mostly core::num generic/#[inline] fns),
667         // while the current crate doesn't use overflow checks.
668         if !bx.cx().check_overflow() {
669             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
670             return OperandValue::Pair(val, bx.cx().const_bool(false));
671         }
672
673         let (val, of) = match op {
674             // These are checked using intrinsics
675             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
676                 let oop = match op {
677                     mir::BinOp::Add => OverflowOp::Add,
678                     mir::BinOp::Sub => OverflowOp::Sub,
679                     mir::BinOp::Mul => OverflowOp::Mul,
680                     _ => unreachable!(),
681                 };
682                 bx.checked_binop(oop, input_ty, lhs, rhs)
683             }
684             mir::BinOp::Shl | mir::BinOp::Shr => {
685                 let lhs_llty = bx.cx().val_ty(lhs);
686                 let rhs_llty = bx.cx().val_ty(rhs);
687                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
688                 let outer_bits = bx.and(rhs, invert_mask);
689
690                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
691                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
692
693                 (val, of)
694             }
695             _ => bug!("Operator `{:?}` is not a checkable operator", op),
696         };
697
698         OperandValue::Pair(val, of)
699     }
700 }
701
702 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
703     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
704         match *rvalue {
705             mir::Rvalue::Ref(..) |
706             mir::Rvalue::CopyForDeref(..) |
707             mir::Rvalue::AddressOf(..) |
708             mir::Rvalue::Len(..) |
709             mir::Rvalue::Cast(..) | // (*)
710             mir::Rvalue::ShallowInitBox(..) | // (*)
711             mir::Rvalue::BinaryOp(..) |
712             mir::Rvalue::CheckedBinaryOp(..) |
713             mir::Rvalue::UnaryOp(..) |
714             mir::Rvalue::Discriminant(..) |
715             mir::Rvalue::NullaryOp(..) |
716             mir::Rvalue::ThreadLocalRef(_) |
717             mir::Rvalue::Use(..) => // (*)
718                 true,
719             mir::Rvalue::Repeat(..) |
720             mir::Rvalue::Aggregate(..) => {
721                 let ty = rvalue.ty(self.mir, self.cx.tcx());
722                 let ty = self.monomorphize(ty);
723                 self.cx.spanned_layout_of(ty, span).is_zst()
724             }
725         }
726
727         // (*) this is only true if the type is suitable
728     }
729 }