]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Rollup merge of #106153 - GuillaumeGomez:search-tabs-headers, r=notriddle
[rust.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_middle::mir;
11 use rustc_middle::mir::Operand;
12 use rustc_middle::ty::cast::{CastTy, IntTy};
13 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
14 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
15 use rustc_span::source_map::{Span, DUMMY_SP};
16
17 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18     #[instrument(level = "trace", skip(self, bx))]
19     pub fn codegen_rvalue(
20         &mut self,
21         bx: &mut Bx,
22         dest: PlaceRef<'tcx, Bx::Value>,
23         rvalue: &mir::Rvalue<'tcx>,
24     ) {
25         match *rvalue {
26             mir::Rvalue::Use(ref operand) => {
27                 let cg_operand = self.codegen_operand(bx, operand);
28                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
29                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
30                 cg_operand.val.store(bx, dest);
31             }
32
33             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
34                 // The destination necessarily contains a fat pointer, so if
35                 // it's a scalar pair, it's a fat pointer or newtype thereof.
36                 if bx.cx().is_backend_scalar_pair(dest.layout) {
37                     // Into-coerce of a thin pointer to a fat pointer -- just
38                     // use the operand path.
39                     let temp = self.codegen_rvalue_operand(bx, rvalue);
40                     temp.val.store(bx, dest);
41                     return;
42                 }
43
44                 // Unsize of a nontrivial struct. I would prefer for
45                 // this to be eliminated by MIR building, but
46                 // `CoerceUnsized` can be passed by a where-clause,
47                 // so the (generic) MIR may not be able to expand it.
48                 let operand = self.codegen_operand(bx, source);
49                 match operand.val {
50                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
51                         // Unsize from an immediate structure. We don't
52                         // really need a temporary alloca here, but
53                         // avoiding it would require us to have
54                         // `coerce_unsized_into` use `extractvalue` to
55                         // index into the struct, and this case isn't
56                         // important enough for it.
57                         debug!("codegen_rvalue: creating ugly alloca");
58                         let scratch = PlaceRef::alloca(bx, operand.layout);
59                         scratch.storage_live(bx);
60                         operand.val.store(bx, scratch);
61                         base::coerce_unsized_into(bx, scratch, dest);
62                         scratch.storage_dead(bx);
63                     }
64                     OperandValue::Ref(llref, None, align) => {
65                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
66                         base::coerce_unsized_into(bx, source, dest);
67                     }
68                     OperandValue::Ref(_, Some(_), _) => {
69                         bug!("unsized coercion on an unsized rvalue");
70                     }
71                 }
72             }
73
74             mir::Rvalue::Repeat(ref elem, count) => {
75                 let cg_elem = self.codegen_operand(bx, elem);
76
77                 // Do not generate the loop for zero-sized elements or empty arrays.
78                 if dest.layout.is_zst() {
79                     return;
80                 }
81
82                 if let OperandValue::Immediate(v) = cg_elem.val {
83                     let zero = bx.const_usize(0);
84                     let start = dest.project_index(bx, zero).llval;
85                     let size = bx.const_usize(dest.layout.size.bytes());
86
87                     // Use llvm.memset.p0i8.* to initialize all zero arrays
88                     if bx.cx().const_to_opt_u128(v, false) == Some(0) {
89                         let fill = bx.cx().const_u8(0);
90                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
91                         return;
92                     }
93
94                     // Use llvm.memset.p0i8.* to initialize byte arrays
95                     let v = bx.from_immediate(v);
96                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
97                         bx.memset(start, v, size, dest.align, MemFlags::empty());
98                         return;
99                     }
100                 }
101
102                 let count =
103                     self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
104
105                 bx.write_operand_repeatedly(cg_elem, count, dest);
106             }
107
108             mir::Rvalue::Aggregate(ref kind, ref operands) => {
109                 let (dest, active_field_index) = match **kind {
110                     mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
111                         dest.codegen_set_discr(bx, variant_index);
112                         if bx.tcx().adt_def(adt_did).is_enum() {
113                             (dest.project_downcast(bx, variant_index), active_field_index)
114                         } else {
115                             (dest, active_field_index)
116                         }
117                     }
118                     _ => (dest, None),
119                 };
120                 for (i, operand) in operands.iter().enumerate() {
121                     let op = self.codegen_operand(bx, operand);
122                     // Do not generate stores and GEPis for zero-sized fields.
123                     if !op.layout.is_zst() {
124                         let field_index = active_field_index.unwrap_or(i);
125                         let field = if let mir::AggregateKind::Array(_) = **kind {
126                             let llindex = bx.cx().const_usize(field_index as u64);
127                             dest.project_index(bx, llindex)
128                         } else {
129                             dest.project_field(bx, field_index)
130                         };
131                         op.val.store(bx, field);
132                     }
133                 }
134             }
135
136             _ => {
137                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
138                 let temp = self.codegen_rvalue_operand(bx, rvalue);
139                 temp.val.store(bx, dest);
140             }
141         }
142     }
143
144     pub fn codegen_rvalue_unsized(
145         &mut self,
146         bx: &mut Bx,
147         indirect_dest: PlaceRef<'tcx, Bx::Value>,
148         rvalue: &mir::Rvalue<'tcx>,
149     ) {
150         debug!(
151             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
152             indirect_dest.llval, rvalue
153         );
154
155         match *rvalue {
156             mir::Rvalue::Use(ref operand) => {
157                 let cg_operand = self.codegen_operand(bx, operand);
158                 cg_operand.val.store_unsized(bx, indirect_dest);
159             }
160
161             _ => bug!("unsized assignment other than `Rvalue::Use`"),
162         }
163     }
164
165     pub fn codegen_rvalue_operand(
166         &mut self,
167         bx: &mut Bx,
168         rvalue: &mir::Rvalue<'tcx>,
169     ) -> OperandRef<'tcx, Bx::Value> {
170         assert!(
171             self.rvalue_creates_operand(rvalue, DUMMY_SP),
172             "cannot codegen {:?} to operand",
173             rvalue,
174         );
175
176         match *rvalue {
177             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
178                 let operand = self.codegen_operand(bx, source);
179                 debug!("cast operand is {:?}", operand);
180                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
181
182                 let val = match *kind {
183                     mir::CastKind::PointerExposeAddress => {
184                         assert!(bx.cx().is_backend_immediate(cast));
185                         let llptr = operand.immediate();
186                         let llcast_ty = bx.cx().immediate_backend_type(cast);
187                         let lladdr = bx.ptrtoint(llptr, llcast_ty);
188                         OperandValue::Immediate(lladdr)
189                     }
190                     mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
191                         match *operand.layout.ty.kind() {
192                             ty::FnDef(def_id, substs) => {
193                                 let instance = ty::Instance::resolve_for_fn_ptr(
194                                     bx.tcx(),
195                                     ty::ParamEnv::reveal_all(),
196                                     def_id,
197                                     substs,
198                                 )
199                                 .unwrap()
200                                 .polymorphize(bx.cx().tcx());
201                                 OperandValue::Immediate(bx.get_fn_addr(instance))
202                             }
203                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
204                         }
205                     }
206                     mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
207                         match *operand.layout.ty.kind() {
208                             ty::Closure(def_id, substs) => {
209                                 let instance = Instance::resolve_closure(
210                                     bx.cx().tcx(),
211                                     def_id,
212                                     substs,
213                                     ty::ClosureKind::FnOnce,
214                                 )
215                                 .expect("failed to normalize and resolve closure during codegen")
216                                 .polymorphize(bx.cx().tcx());
217                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
218                             }
219                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
220                         }
221                     }
222                     mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
223                         // This is a no-op at the LLVM level.
224                         operand.val
225                     }
226                     mir::CastKind::Pointer(PointerCast::Unsize) => {
227                         assert!(bx.cx().is_backend_scalar_pair(cast));
228                         let (lldata, llextra) = match operand.val {
229                             OperandValue::Pair(lldata, llextra) => {
230                                 // unsize from a fat pointer -- this is a
231                                 // "trait-object-to-supertrait" coercion.
232                                 (lldata, Some(llextra))
233                             }
234                             OperandValue::Immediate(lldata) => {
235                                 // "standard" unsize
236                                 (lldata, None)
237                             }
238                             OperandValue::Ref(..) => {
239                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
240                             }
241                         };
242                         let (lldata, llextra) =
243                             base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
244                         OperandValue::Pair(lldata, llextra)
245                     }
246                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
247                     | mir::CastKind::PtrToPtr
248                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
249                     {
250                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
251                             if bx.cx().is_backend_scalar_pair(cast) {
252                                 let data_cast = bx.pointercast(
253                                     data_ptr,
254                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
255                                 );
256                                 OperandValue::Pair(data_cast, meta)
257                             } else {
258                                 // cast to thin-ptr
259                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
260                                 // pointer-cast of that pointer to desired pointer type.
261                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
262                                 let llval = bx.pointercast(data_ptr, llcast_ty);
263                                 OperandValue::Immediate(llval)
264                             }
265                         } else {
266                             bug!("unexpected non-pair operand");
267                         }
268                     }
269                     mir::CastKind::DynStar => {
270                         let (lldata, llextra) = match operand.val {
271                             OperandValue::Ref(_, _, _) => todo!(),
272                             OperandValue::Immediate(v) => (v, None),
273                             OperandValue::Pair(v, l) => (v, Some(l)),
274                         };
275                         let (lldata, llextra) =
276                             base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
277                         OperandValue::Pair(lldata, llextra)
278                     }
279                     mir::CastKind::Pointer(
280                         PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
281                     )
282                     | mir::CastKind::IntToInt
283                     | mir::CastKind::FloatToInt
284                     | mir::CastKind::FloatToFloat
285                     | mir::CastKind::IntToFloat
286                     | mir::CastKind::PtrToPtr
287                     | mir::CastKind::FnPtrToPtr
288
289                     // Since int2ptr can have arbitrary integer types as input (so we have to do
290                     // sign extension and all that), it is currently best handled in the same code
291                     // path as the other integer-to-X casts.
292                     | mir::CastKind::PointerFromExposedAddress => {
293                         assert!(bx.cx().is_backend_immediate(cast));
294                         let ll_t_out = bx.cx().immediate_backend_type(cast);
295                         if operand.layout.abi.is_uninhabited() {
296                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
297                             return OperandRef { val, layout: cast };
298                         }
299                         let r_t_in =
300                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
301                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
302                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
303                         let llval = operand.immediate();
304
305                         let newval = match (r_t_in, r_t_out) {
306                             (CastTy::Int(i), CastTy::Int(_)) => {
307                                 bx.intcast(llval, ll_t_out, i.is_signed())
308                             }
309                             (CastTy::Float, CastTy::Float) => {
310                                 let srcsz = bx.cx().float_width(ll_t_in);
311                                 let dstsz = bx.cx().float_width(ll_t_out);
312                                 if dstsz > srcsz {
313                                     bx.fpext(llval, ll_t_out)
314                                 } else if srcsz > dstsz {
315                                     bx.fptrunc(llval, ll_t_out)
316                                 } else {
317                                     llval
318                                 }
319                             }
320                             (CastTy::Int(i), CastTy::Float) => {
321                                 if i.is_signed() {
322                                     bx.sitofp(llval, ll_t_out)
323                                 } else {
324                                     bx.uitofp(llval, ll_t_out)
325                                 }
326                             }
327                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
328                                 bx.pointercast(llval, ll_t_out)
329                             }
330                             (CastTy::Int(i), CastTy::Ptr(_)) => {
331                                 let usize_llval =
332                                     bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
333                                 bx.inttoptr(usize_llval, ll_t_out)
334                             }
335                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
336                                 bx.cast_float_to_int(true, llval, ll_t_out)
337                             }
338                             (CastTy::Float, CastTy::Int(_)) => {
339                                 bx.cast_float_to_int(false, llval, ll_t_out)
340                             }
341                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
342                         };
343                         OperandValue::Immediate(newval)
344                     }
345                 };
346                 OperandRef { val, layout: cast }
347             }
348
349             mir::Rvalue::Ref(_, bk, place) => {
350                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
351                     tcx.mk_ref(
352                         tcx.lifetimes.re_erased,
353                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
354                     )
355                 };
356                 self.codegen_place_to_pointer(bx, place, mk_ref)
357             }
358
359             mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)),
360             mir::Rvalue::AddressOf(mutability, place) => {
361                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
362                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
363                 };
364                 self.codegen_place_to_pointer(bx, place, mk_ptr)
365             }
366
367             mir::Rvalue::Len(place) => {
368                 let size = self.evaluate_array_len(bx, place);
369                 OperandRef {
370                     val: OperandValue::Immediate(size),
371                     layout: bx.cx().layout_of(bx.tcx().types.usize),
372                 }
373             }
374
375             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
376                 let lhs = self.codegen_operand(bx, lhs);
377                 let rhs = self.codegen_operand(bx, rhs);
378                 let llresult = match (lhs.val, rhs.val) {
379                     (
380                         OperandValue::Pair(lhs_addr, lhs_extra),
381                         OperandValue::Pair(rhs_addr, rhs_extra),
382                     ) => self.codegen_fat_ptr_binop(
383                         bx,
384                         op,
385                         lhs_addr,
386                         lhs_extra,
387                         rhs_addr,
388                         rhs_extra,
389                         lhs.layout.ty,
390                     ),
391
392                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
393                         self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty)
394                     }
395
396                     _ => bug!(),
397                 };
398                 OperandRef {
399                     val: OperandValue::Immediate(llresult),
400                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
401                 }
402             }
403             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
404                 let lhs = self.codegen_operand(bx, lhs);
405                 let rhs = self.codegen_operand(bx, rhs);
406                 let result = self.codegen_scalar_checked_binop(
407                     bx,
408                     op,
409                     lhs.immediate(),
410                     rhs.immediate(),
411                     lhs.layout.ty,
412                 );
413                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
414                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
415                 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
416             }
417
418             mir::Rvalue::UnaryOp(op, ref operand) => {
419                 let operand = self.codegen_operand(bx, operand);
420                 let lloperand = operand.immediate();
421                 let is_float = operand.layout.ty.is_floating_point();
422                 let llval = match op {
423                     mir::UnOp::Not => bx.not(lloperand),
424                     mir::UnOp::Neg => {
425                         if is_float {
426                             bx.fneg(lloperand)
427                         } else {
428                             bx.neg(lloperand)
429                         }
430                     }
431                 };
432                 OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
433             }
434
435             mir::Rvalue::Discriminant(ref place) => {
436                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
437                 let discr_ty = self.monomorphize(discr_ty);
438                 let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty);
439                 OperandRef {
440                     val: OperandValue::Immediate(discr),
441                     layout: self.cx.layout_of(discr_ty),
442                 }
443             }
444
445             mir::Rvalue::NullaryOp(null_op, ty) => {
446                 let ty = self.monomorphize(ty);
447                 assert!(bx.cx().type_is_sized(ty));
448                 let layout = bx.cx().layout_of(ty);
449                 let val = match null_op {
450                     mir::NullOp::SizeOf => layout.size.bytes(),
451                     mir::NullOp::AlignOf => layout.align.abi.bytes(),
452                 };
453                 let val = bx.cx().const_usize(val);
454                 let tcx = self.cx.tcx();
455                 OperandRef {
456                     val: OperandValue::Immediate(val),
457                     layout: self.cx.layout_of(tcx.types.usize),
458                 }
459             }
460
461             mir::Rvalue::ThreadLocalRef(def_id) => {
462                 assert!(bx.cx().tcx().is_static(def_id));
463                 let static_ = bx.get_static(def_id);
464                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
465                 OperandRef { val: OperandValue::Immediate(static_), layout }
466             }
467             mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
468             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
469                 // According to `rvalue_creates_operand`, only ZST
470                 // aggregate rvalues are allowed to be operands.
471                 let ty = rvalue.ty(self.mir, self.cx.tcx());
472                 OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty)))
473             }
474             mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
475                 let operand = self.codegen_operand(bx, operand);
476                 let lloperand = operand.immediate();
477
478                 let content_ty = self.monomorphize(content_ty);
479                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
480                 let llty_ptr = bx.cx().backend_type(box_layout);
481
482                 let val = bx.pointercast(lloperand, llty_ptr);
483                 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
484             }
485         }
486     }
487
488     fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
489         // ZST are passed as operands and require special handling
490         // because codegen_place() panics if Local is operand.
491         if let Some(index) = place.as_local() {
492             if let LocalRef::Operand(Some(op)) = self.locals[index] {
493                 if let ty::Array(_, n) = op.layout.ty.kind() {
494                     let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
495                     return bx.cx().const_usize(n);
496                 }
497             }
498         }
499         // use common size calculation for non zero-sized types
500         let cg_value = self.codegen_place(bx, place.as_ref());
501         cg_value.len(bx.cx())
502     }
503
504     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
505     fn codegen_place_to_pointer(
506         &mut self,
507         bx: &mut Bx,
508         place: mir::Place<'tcx>,
509         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
510     ) -> OperandRef<'tcx, Bx::Value> {
511         let cg_place = self.codegen_place(bx, place.as_ref());
512
513         let ty = cg_place.layout.ty;
514
515         // Note: places are indirect, so storing the `llval` into the
516         // destination effectively creates a reference.
517         let val = if !bx.cx().type_has_metadata(ty) {
518             OperandValue::Immediate(cg_place.llval)
519         } else {
520             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
521         };
522         OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
523     }
524
525     pub fn codegen_scalar_binop(
526         &mut self,
527         bx: &mut Bx,
528         op: mir::BinOp,
529         lhs: Bx::Value,
530         rhs: Bx::Value,
531         input_ty: Ty<'tcx>,
532     ) -> Bx::Value {
533         let is_float = input_ty.is_floating_point();
534         let is_signed = input_ty.is_signed();
535         match op {
536             mir::BinOp::Add => {
537                 if is_float {
538                     bx.fadd(lhs, rhs)
539                 } else {
540                     bx.add(lhs, rhs)
541                 }
542             }
543             mir::BinOp::Sub => {
544                 if is_float {
545                     bx.fsub(lhs, rhs)
546                 } else {
547                     bx.sub(lhs, rhs)
548                 }
549             }
550             mir::BinOp::Mul => {
551                 if is_float {
552                     bx.fmul(lhs, rhs)
553                 } else {
554                     bx.mul(lhs, rhs)
555                 }
556             }
557             mir::BinOp::Div => {
558                 if is_float {
559                     bx.fdiv(lhs, rhs)
560                 } else if is_signed {
561                     bx.sdiv(lhs, rhs)
562                 } else {
563                     bx.udiv(lhs, rhs)
564                 }
565             }
566             mir::BinOp::Rem => {
567                 if is_float {
568                     bx.frem(lhs, rhs)
569                 } else if is_signed {
570                     bx.srem(lhs, rhs)
571                 } else {
572                     bx.urem(lhs, rhs)
573                 }
574             }
575             mir::BinOp::BitOr => bx.or(lhs, rhs),
576             mir::BinOp::BitAnd => bx.and(lhs, rhs),
577             mir::BinOp::BitXor => bx.xor(lhs, rhs),
578             mir::BinOp::Offset => {
579                 let pointee_type = input_ty
580                     .builtin_deref(true)
581                     .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
582                     .ty;
583                 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
584                 bx.inbounds_gep(llty, lhs, &[rhs])
585             }
586             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
587             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
588             mir::BinOp::Ne
589             | mir::BinOp::Lt
590             | mir::BinOp::Gt
591             | mir::BinOp::Eq
592             | mir::BinOp::Le
593             | mir::BinOp::Ge => {
594                 if is_float {
595                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
596                 } else {
597                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
598                 }
599             }
600         }
601     }
602
603     pub fn codegen_fat_ptr_binop(
604         &mut self,
605         bx: &mut Bx,
606         op: mir::BinOp,
607         lhs_addr: Bx::Value,
608         lhs_extra: Bx::Value,
609         rhs_addr: Bx::Value,
610         rhs_extra: Bx::Value,
611         _input_ty: Ty<'tcx>,
612     ) -> Bx::Value {
613         match op {
614             mir::BinOp::Eq => {
615                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
616                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
617                 bx.and(lhs, rhs)
618             }
619             mir::BinOp::Ne => {
620                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
621                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
622                 bx.or(lhs, rhs)
623             }
624             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
625                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
626                 let (op, strict_op) = match op {
627                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
628                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
629                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
630                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
631                     _ => bug!(),
632                 };
633                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
634                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
635                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
636                 let rhs = bx.and(and_lhs, and_rhs);
637                 bx.or(lhs, rhs)
638             }
639             _ => {
640                 bug!("unexpected fat ptr binop");
641             }
642         }
643     }
644
645     pub fn codegen_scalar_checked_binop(
646         &mut self,
647         bx: &mut Bx,
648         op: mir::BinOp,
649         lhs: Bx::Value,
650         rhs: Bx::Value,
651         input_ty: Ty<'tcx>,
652     ) -> OperandValue<Bx::Value> {
653         // This case can currently arise only from functions marked
654         // with #[rustc_inherit_overflow_checks] and inlined from
655         // another crate (mostly core::num generic/#[inline] fns),
656         // while the current crate doesn't use overflow checks.
657         if !bx.cx().check_overflow() {
658             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
659             return OperandValue::Pair(val, bx.cx().const_bool(false));
660         }
661
662         let (val, of) = match op {
663             // These are checked using intrinsics
664             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
665                 let oop = match op {
666                     mir::BinOp::Add => OverflowOp::Add,
667                     mir::BinOp::Sub => OverflowOp::Sub,
668                     mir::BinOp::Mul => OverflowOp::Mul,
669                     _ => unreachable!(),
670                 };
671                 bx.checked_binop(oop, input_ty, lhs, rhs)
672             }
673             mir::BinOp::Shl | mir::BinOp::Shr => {
674                 let lhs_llty = bx.cx().val_ty(lhs);
675                 let rhs_llty = bx.cx().val_ty(rhs);
676                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
677                 let outer_bits = bx.and(rhs, invert_mask);
678
679                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
680                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
681
682                 (val, of)
683             }
684             _ => bug!("Operator `{:?}` is not a checkable operator", op),
685         };
686
687         OperandValue::Pair(val, of)
688     }
689 }
690
691 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
692     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
693         match *rvalue {
694             mir::Rvalue::Ref(..) |
695             mir::Rvalue::CopyForDeref(..) |
696             mir::Rvalue::AddressOf(..) |
697             mir::Rvalue::Len(..) |
698             mir::Rvalue::Cast(..) | // (*)
699             mir::Rvalue::ShallowInitBox(..) | // (*)
700             mir::Rvalue::BinaryOp(..) |
701             mir::Rvalue::CheckedBinaryOp(..) |
702             mir::Rvalue::UnaryOp(..) |
703             mir::Rvalue::Discriminant(..) |
704             mir::Rvalue::NullaryOp(..) |
705             mir::Rvalue::ThreadLocalRef(_) |
706             mir::Rvalue::Use(..) => // (*)
707                 true,
708             mir::Rvalue::Repeat(..) |
709             mir::Rvalue::Aggregate(..) => {
710                 let ty = rvalue.ty(self.mir, self.cx.tcx());
711                 let ty = self.monomorphize(ty);
712                 self.cx.spanned_layout_of(ty, span).is_zst()
713             }
714         }
715
716         // (*) this is only true if the type is suitable
717     }
718 }