]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Rollup merge of #93862 - Mark-Simulacrum:apple-split, r=pietroalbini
[rust.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_middle::mir;
11 use rustc_middle::ty::cast::{CastTy, IntTy};
12 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
13 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
14 use rustc_span::source_map::{Span, DUMMY_SP};
15 use rustc_target::abi::{Abi, Int, Variants};
16
17 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18     pub fn codegen_rvalue(
19         &mut self,
20         mut bx: Bx,
21         dest: PlaceRef<'tcx, Bx::Value>,
22         rvalue: &mir::Rvalue<'tcx>,
23     ) -> Bx {
24         debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
25
26         match *rvalue {
27             mir::Rvalue::Use(ref operand) => {
28                 let cg_operand = self.codegen_operand(&mut bx, operand);
29                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
30                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
31                 cg_operand.val.store(&mut bx, dest);
32                 bx
33             }
34
35             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
36                 // The destination necessarily contains a fat pointer, so if
37                 // it's a scalar pair, it's a fat pointer or newtype thereof.
38                 if bx.cx().is_backend_scalar_pair(dest.layout) {
39                     // Into-coerce of a thin pointer to a fat pointer -- just
40                     // use the operand path.
41                     let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
42                     temp.val.store(&mut bx, dest);
43                     return bx;
44                 }
45
46                 // Unsize of a nontrivial struct. I would prefer for
47                 // this to be eliminated by MIR building, but
48                 // `CoerceUnsized` can be passed by a where-clause,
49                 // so the (generic) MIR may not be able to expand it.
50                 let operand = self.codegen_operand(&mut bx, source);
51                 match operand.val {
52                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
53                         // Unsize from an immediate structure. We don't
54                         // really need a temporary alloca here, but
55                         // avoiding it would require us to have
56                         // `coerce_unsized_into` use `extractvalue` to
57                         // index into the struct, and this case isn't
58                         // important enough for it.
59                         debug!("codegen_rvalue: creating ugly alloca");
60                         let scratch = PlaceRef::alloca(&mut bx, operand.layout);
61                         scratch.storage_live(&mut bx);
62                         operand.val.store(&mut bx, scratch);
63                         base::coerce_unsized_into(&mut bx, scratch, dest);
64                         scratch.storage_dead(&mut bx);
65                     }
66                     OperandValue::Ref(llref, None, align) => {
67                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
68                         base::coerce_unsized_into(&mut bx, source, dest);
69                     }
70                     OperandValue::Ref(_, Some(_), _) => {
71                         bug!("unsized coercion on an unsized rvalue");
72                     }
73                 }
74                 bx
75             }
76
77             mir::Rvalue::Repeat(ref elem, count) => {
78                 let cg_elem = self.codegen_operand(&mut bx, elem);
79
80                 // Do not generate the loop for zero-sized elements or empty arrays.
81                 if dest.layout.is_zst() {
82                     return bx;
83                 }
84
85                 if let OperandValue::Immediate(v) = cg_elem.val {
86                     let zero = bx.const_usize(0);
87                     let start = dest.project_index(&mut bx, zero).llval;
88                     let size = bx.const_usize(dest.layout.size.bytes());
89
90                     // Use llvm.memset.p0i8.* to initialize all zero arrays
91                     if bx.cx().const_to_opt_uint(v) == Some(0) {
92                         let fill = bx.cx().const_u8(0);
93                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
94                         return bx;
95                     }
96
97                     // Use llvm.memset.p0i8.* to initialize byte arrays
98                     let v = bx.from_immediate(v);
99                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
100                         bx.memset(start, v, size, dest.align, MemFlags::empty());
101                         return bx;
102                     }
103                 }
104
105                 let count =
106                     self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
107
108                 bx.write_operand_repeatedly(cg_elem, count, dest)
109             }
110
111             mir::Rvalue::Aggregate(ref kind, ref operands) => {
112                 let (dest, active_field_index) = match **kind {
113                     mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
114                         dest.codegen_set_discr(&mut bx, variant_index);
115                         if bx.tcx().adt_def(adt_did).is_enum() {
116                             (dest.project_downcast(&mut bx, variant_index), active_field_index)
117                         } else {
118                             (dest, active_field_index)
119                         }
120                     }
121                     _ => (dest, None),
122                 };
123                 for (i, operand) in operands.iter().enumerate() {
124                     let op = self.codegen_operand(&mut bx, operand);
125                     // Do not generate stores and GEPis for zero-sized fields.
126                     if !op.layout.is_zst() {
127                         let field_index = active_field_index.unwrap_or(i);
128                         let field = dest.project_field(&mut bx, field_index);
129                         op.val.store(&mut bx, field);
130                     }
131                 }
132                 bx
133             }
134
135             _ => {
136                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
137                 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
138                 temp.val.store(&mut bx, dest);
139                 bx
140             }
141         }
142     }
143
144     pub fn codegen_rvalue_unsized(
145         &mut self,
146         mut bx: Bx,
147         indirect_dest: PlaceRef<'tcx, Bx::Value>,
148         rvalue: &mir::Rvalue<'tcx>,
149     ) -> Bx {
150         debug!(
151             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
152             indirect_dest.llval, rvalue
153         );
154
155         match *rvalue {
156             mir::Rvalue::Use(ref operand) => {
157                 let cg_operand = self.codegen_operand(&mut bx, operand);
158                 cg_operand.val.store_unsized(&mut bx, indirect_dest);
159                 bx
160             }
161
162             _ => bug!("unsized assignment other than `Rvalue::Use`"),
163         }
164     }
165
166     pub fn codegen_rvalue_operand(
167         &mut self,
168         mut bx: Bx,
169         rvalue: &mir::Rvalue<'tcx>,
170     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
171         assert!(
172             self.rvalue_creates_operand(rvalue, DUMMY_SP),
173             "cannot codegen {:?} to operand",
174             rvalue,
175         );
176
177         match *rvalue {
178             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
179                 let operand = self.codegen_operand(&mut bx, source);
180                 debug!("cast operand is {:?}", operand);
181                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
182
183                 let val = match *kind {
184                     mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
185                         match *operand.layout.ty.kind() {
186                             ty::FnDef(def_id, substs) => {
187                                 let instance = ty::Instance::resolve_for_fn_ptr(
188                                     bx.tcx(),
189                                     ty::ParamEnv::reveal_all(),
190                                     def_id,
191                                     substs,
192                                 )
193                                 .unwrap()
194                                 .polymorphize(bx.cx().tcx());
195                                 OperandValue::Immediate(bx.get_fn_addr(instance))
196                             }
197                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
198                         }
199                     }
200                     mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
201                         match *operand.layout.ty.kind() {
202                             ty::Closure(def_id, substs) => {
203                                 let instance = Instance::resolve_closure(
204                                     bx.cx().tcx(),
205                                     def_id,
206                                     substs,
207                                     ty::ClosureKind::FnOnce,
208                                 )
209                                 .polymorphize(bx.cx().tcx());
210                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
211                             }
212                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
213                         }
214                     }
215                     mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
216                         // This is a no-op at the LLVM level.
217                         operand.val
218                     }
219                     mir::CastKind::Pointer(PointerCast::Unsize) => {
220                         assert!(bx.cx().is_backend_scalar_pair(cast));
221                         let (lldata, llextra) = match operand.val {
222                             OperandValue::Pair(lldata, llextra) => {
223                                 // unsize from a fat pointer -- this is a
224                                 // "trait-object-to-supertrait" coercion.
225                                 (lldata, Some(llextra))
226                             }
227                             OperandValue::Immediate(lldata) => {
228                                 // "standard" unsize
229                                 (lldata, None)
230                             }
231                             OperandValue::Ref(..) => {
232                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
233                             }
234                         };
235                         let (lldata, llextra) =
236                             base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
237                         OperandValue::Pair(lldata, llextra)
238                     }
239                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
240                     | mir::CastKind::Misc
241                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
242                     {
243                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
244                             if bx.cx().is_backend_scalar_pair(cast) {
245                                 let data_cast = bx.pointercast(
246                                     data_ptr,
247                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
248                                 );
249                                 OperandValue::Pair(data_cast, meta)
250                             } else {
251                                 // cast to thin-ptr
252                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
253                                 // pointer-cast of that pointer to desired pointer type.
254                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
255                                 let llval = bx.pointercast(data_ptr, llcast_ty);
256                                 OperandValue::Immediate(llval)
257                             }
258                         } else {
259                             bug!("unexpected non-pair operand");
260                         }
261                     }
262                     mir::CastKind::Pointer(
263                         PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
264                     )
265                     | mir::CastKind::Misc => {
266                         assert!(bx.cx().is_backend_immediate(cast));
267                         let ll_t_out = bx.cx().immediate_backend_type(cast);
268                         if operand.layout.abi.is_uninhabited() {
269                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
270                             return (bx, OperandRef { val, layout: cast });
271                         }
272                         let r_t_in =
273                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
274                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
275                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
276                         match operand.layout.variants {
277                             Variants::Single { index } => {
278                                 if let Some(discr) =
279                                     operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
280                                 {
281                                     let discr_layout = bx.cx().layout_of(discr.ty);
282                                     let discr_t = bx.cx().immediate_backend_type(discr_layout);
283                                     let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
284                                     let discr_val =
285                                         bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
286
287                                     return (
288                                         bx,
289                                         OperandRef {
290                                             val: OperandValue::Immediate(discr_val),
291                                             layout: cast,
292                                         },
293                                     );
294                                 }
295                             }
296                             Variants::Multiple { .. } => {}
297                         }
298                         let llval = operand.immediate();
299
300                         let mut signed = false;
301                         if let Abi::Scalar(scalar) = operand.layout.abi {
302                             if let Int(_, s) = scalar.value {
303                                 // We use `i1` for bytes that are always `0` or `1`,
304                                 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
305                                 // let LLVM interpret the `i1` as signed, because
306                                 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
307                                 signed = !scalar.is_bool() && s;
308
309                                 if !scalar.is_always_valid(bx.cx())
310                                     && scalar.valid_range.end >= scalar.valid_range.start
311                                 {
312                                     // We want `table[e as usize ± k]` to not
313                                     // have bound checks, and this is the most
314                                     // convenient place to put the `assume`s.
315                                     if scalar.valid_range.start > 0 {
316                                         let enum_value_lower_bound = bx
317                                             .cx()
318                                             .const_uint_big(ll_t_in, scalar.valid_range.start);
319                                         let cmp_start = bx.icmp(
320                                             IntPredicate::IntUGE,
321                                             llval,
322                                             enum_value_lower_bound,
323                                         );
324                                         bx.assume(cmp_start);
325                                     }
326
327                                     let enum_value_upper_bound =
328                                         bx.cx().const_uint_big(ll_t_in, scalar.valid_range.end);
329                                     let cmp_end = bx.icmp(
330                                         IntPredicate::IntULE,
331                                         llval,
332                                         enum_value_upper_bound,
333                                     );
334                                     bx.assume(cmp_end);
335                                 }
336                             }
337                         }
338
339                         let newval = match (r_t_in, r_t_out) {
340                             (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
341                             (CastTy::Float, CastTy::Float) => {
342                                 let srcsz = bx.cx().float_width(ll_t_in);
343                                 let dstsz = bx.cx().float_width(ll_t_out);
344                                 if dstsz > srcsz {
345                                     bx.fpext(llval, ll_t_out)
346                                 } else if srcsz > dstsz {
347                                     bx.fptrunc(llval, ll_t_out)
348                                 } else {
349                                     llval
350                                 }
351                             }
352                             (CastTy::Int(_), CastTy::Float) => {
353                                 if signed {
354                                     bx.sitofp(llval, ll_t_out)
355                                 } else {
356                                     bx.uitofp(llval, ll_t_out)
357                                 }
358                             }
359                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
360                                 bx.pointercast(llval, ll_t_out)
361                             }
362                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
363                                 bx.ptrtoint(llval, ll_t_out)
364                             }
365                             (CastTy::Int(_), CastTy::Ptr(_)) => {
366                                 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
367                                 bx.inttoptr(usize_llval, ll_t_out)
368                             }
369                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
370                                 bx.cast_float_to_int(true, llval, ll_t_out)
371                             }
372                             (CastTy::Float, CastTy::Int(_)) => {
373                                 bx.cast_float_to_int(false, llval, ll_t_out)
374                             }
375                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
376                         };
377                         OperandValue::Immediate(newval)
378                     }
379                 };
380                 (bx, OperandRef { val, layout: cast })
381             }
382
383             mir::Rvalue::Ref(_, bk, place) => {
384                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
385                     tcx.mk_ref(
386                         tcx.lifetimes.re_erased,
387                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
388                     )
389                 };
390                 self.codegen_place_to_pointer(bx, place, mk_ref)
391             }
392
393             mir::Rvalue::AddressOf(mutability, place) => {
394                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
395                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
396                 };
397                 self.codegen_place_to_pointer(bx, place, mk_ptr)
398             }
399
400             mir::Rvalue::Len(place) => {
401                 let size = self.evaluate_array_len(&mut bx, place);
402                 let operand = OperandRef {
403                     val: OperandValue::Immediate(size),
404                     layout: bx.cx().layout_of(bx.tcx().types.usize),
405                 };
406                 (bx, operand)
407             }
408
409             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
410                 let lhs = self.codegen_operand(&mut bx, lhs);
411                 let rhs = self.codegen_operand(&mut bx, rhs);
412                 let llresult = match (lhs.val, rhs.val) {
413                     (
414                         OperandValue::Pair(lhs_addr, lhs_extra),
415                         OperandValue::Pair(rhs_addr, rhs_extra),
416                     ) => self.codegen_fat_ptr_binop(
417                         &mut bx,
418                         op,
419                         lhs_addr,
420                         lhs_extra,
421                         rhs_addr,
422                         rhs_extra,
423                         lhs.layout.ty,
424                     ),
425
426                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
427                         self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
428                     }
429
430                     _ => bug!(),
431                 };
432                 let operand = OperandRef {
433                     val: OperandValue::Immediate(llresult),
434                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
435                 };
436                 (bx, operand)
437             }
438             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
439                 let lhs = self.codegen_operand(&mut bx, lhs);
440                 let rhs = self.codegen_operand(&mut bx, rhs);
441                 let result = self.codegen_scalar_checked_binop(
442                     &mut bx,
443                     op,
444                     lhs.immediate(),
445                     rhs.immediate(),
446                     lhs.layout.ty,
447                 );
448                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
449                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
450                 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
451
452                 (bx, operand)
453             }
454
455             mir::Rvalue::UnaryOp(op, ref operand) => {
456                 let operand = self.codegen_operand(&mut bx, operand);
457                 let lloperand = operand.immediate();
458                 let is_float = operand.layout.ty.is_floating_point();
459                 let llval = match op {
460                     mir::UnOp::Not => bx.not(lloperand),
461                     mir::UnOp::Neg => {
462                         if is_float {
463                             bx.fneg(lloperand)
464                         } else {
465                             bx.neg(lloperand)
466                         }
467                     }
468                 };
469                 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
470             }
471
472             mir::Rvalue::Discriminant(ref place) => {
473                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
474                 let discr_ty = self.monomorphize(discr_ty);
475                 let discr = self
476                     .codegen_place(&mut bx, place.as_ref())
477                     .codegen_get_discr(&mut bx, discr_ty);
478                 (
479                     bx,
480                     OperandRef {
481                         val: OperandValue::Immediate(discr),
482                         layout: self.cx.layout_of(discr_ty),
483                     },
484                 )
485             }
486
487             mir::Rvalue::NullaryOp(null_op, ty) => {
488                 let ty = self.monomorphize(ty);
489                 assert!(bx.cx().type_is_sized(ty));
490                 let layout = bx.cx().layout_of(ty);
491                 let val = match null_op {
492                     mir::NullOp::SizeOf => layout.size.bytes(),
493                     mir::NullOp::AlignOf => layout.align.abi.bytes(),
494                 };
495                 let val = bx.cx().const_usize(val);
496                 let tcx = self.cx.tcx();
497                 (
498                     bx,
499                     OperandRef {
500                         val: OperandValue::Immediate(val),
501                         layout: self.cx.layout_of(tcx.types.usize),
502                     },
503                 )
504             }
505
506             mir::Rvalue::ThreadLocalRef(def_id) => {
507                 assert!(bx.cx().tcx().is_static(def_id));
508                 let static_ = bx.get_static(def_id);
509                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
510                 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
511                 (bx, operand)
512             }
513             mir::Rvalue::Use(ref operand) => {
514                 let operand = self.codegen_operand(&mut bx, operand);
515                 (bx, operand)
516             }
517             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
518                 // According to `rvalue_creates_operand`, only ZST
519                 // aggregate rvalues are allowed to be operands.
520                 let ty = rvalue.ty(self.mir, self.cx.tcx());
521                 let operand =
522                     OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
523                 (bx, operand)
524             }
525             mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
526                 let operand = self.codegen_operand(&mut bx, operand);
527                 let lloperand = operand.immediate();
528
529                 let content_ty = self.monomorphize(content_ty);
530                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
531                 let llty_ptr = bx.cx().backend_type(box_layout);
532
533                 let val = bx.pointercast(lloperand, llty_ptr);
534                 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
535                 (bx, operand)
536             }
537         }
538     }
539
540     fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
541         // ZST are passed as operands and require special handling
542         // because codegen_place() panics if Local is operand.
543         if let Some(index) = place.as_local() {
544             if let LocalRef::Operand(Some(op)) = self.locals[index] {
545                 if let ty::Array(_, n) = op.layout.ty.kind() {
546                     let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
547                     return bx.cx().const_usize(n);
548                 }
549             }
550         }
551         // use common size calculation for non zero-sized types
552         let cg_value = self.codegen_place(bx, place.as_ref());
553         cg_value.len(bx.cx())
554     }
555
556     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
557     fn codegen_place_to_pointer(
558         &mut self,
559         mut bx: Bx,
560         place: mir::Place<'tcx>,
561         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
562     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
563         let cg_place = self.codegen_place(&mut bx, place.as_ref());
564
565         let ty = cg_place.layout.ty;
566
567         // Note: places are indirect, so storing the `llval` into the
568         // destination effectively creates a reference.
569         let val = if !bx.cx().type_has_metadata(ty) {
570             OperandValue::Immediate(cg_place.llval)
571         } else {
572             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
573         };
574         (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
575     }
576
577     pub fn codegen_scalar_binop(
578         &mut self,
579         bx: &mut Bx,
580         op: mir::BinOp,
581         lhs: Bx::Value,
582         rhs: Bx::Value,
583         input_ty: Ty<'tcx>,
584     ) -> Bx::Value {
585         let is_float = input_ty.is_floating_point();
586         let is_signed = input_ty.is_signed();
587         match op {
588             mir::BinOp::Add => {
589                 if is_float {
590                     bx.fadd(lhs, rhs)
591                 } else {
592                     bx.add(lhs, rhs)
593                 }
594             }
595             mir::BinOp::Sub => {
596                 if is_float {
597                     bx.fsub(lhs, rhs)
598                 } else {
599                     bx.sub(lhs, rhs)
600                 }
601             }
602             mir::BinOp::Mul => {
603                 if is_float {
604                     bx.fmul(lhs, rhs)
605                 } else {
606                     bx.mul(lhs, rhs)
607                 }
608             }
609             mir::BinOp::Div => {
610                 if is_float {
611                     bx.fdiv(lhs, rhs)
612                 } else if is_signed {
613                     bx.sdiv(lhs, rhs)
614                 } else {
615                     bx.udiv(lhs, rhs)
616                 }
617             }
618             mir::BinOp::Rem => {
619                 if is_float {
620                     bx.frem(lhs, rhs)
621                 } else if is_signed {
622                     bx.srem(lhs, rhs)
623                 } else {
624                     bx.urem(lhs, rhs)
625                 }
626             }
627             mir::BinOp::BitOr => bx.or(lhs, rhs),
628             mir::BinOp::BitAnd => bx.and(lhs, rhs),
629             mir::BinOp::BitXor => bx.xor(lhs, rhs),
630             mir::BinOp::Offset => {
631                 let pointee_type = input_ty
632                     .builtin_deref(true)
633                     .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
634                     .ty;
635                 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
636                 bx.inbounds_gep(llty, lhs, &[rhs])
637             }
638             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
639             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
640             mir::BinOp::Ne
641             | mir::BinOp::Lt
642             | mir::BinOp::Gt
643             | mir::BinOp::Eq
644             | mir::BinOp::Le
645             | mir::BinOp::Ge => {
646                 if is_float {
647                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
648                 } else {
649                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
650                 }
651             }
652         }
653     }
654
655     pub fn codegen_fat_ptr_binop(
656         &mut self,
657         bx: &mut Bx,
658         op: mir::BinOp,
659         lhs_addr: Bx::Value,
660         lhs_extra: Bx::Value,
661         rhs_addr: Bx::Value,
662         rhs_extra: Bx::Value,
663         _input_ty: Ty<'tcx>,
664     ) -> Bx::Value {
665         match op {
666             mir::BinOp::Eq => {
667                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
668                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
669                 bx.and(lhs, rhs)
670             }
671             mir::BinOp::Ne => {
672                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
673                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
674                 bx.or(lhs, rhs)
675             }
676             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
677                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
678                 let (op, strict_op) = match op {
679                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
680                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
681                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
682                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
683                     _ => bug!(),
684                 };
685                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
686                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
687                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
688                 let rhs = bx.and(and_lhs, and_rhs);
689                 bx.or(lhs, rhs)
690             }
691             _ => {
692                 bug!("unexpected fat ptr binop");
693             }
694         }
695     }
696
697     pub fn codegen_scalar_checked_binop(
698         &mut self,
699         bx: &mut Bx,
700         op: mir::BinOp,
701         lhs: Bx::Value,
702         rhs: Bx::Value,
703         input_ty: Ty<'tcx>,
704     ) -> OperandValue<Bx::Value> {
705         // This case can currently arise only from functions marked
706         // with #[rustc_inherit_overflow_checks] and inlined from
707         // another crate (mostly core::num generic/#[inline] fns),
708         // while the current crate doesn't use overflow checks.
709         if !bx.cx().check_overflow() {
710             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
711             return OperandValue::Pair(val, bx.cx().const_bool(false));
712         }
713
714         let (val, of) = match op {
715             // These are checked using intrinsics
716             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
717                 let oop = match op {
718                     mir::BinOp::Add => OverflowOp::Add,
719                     mir::BinOp::Sub => OverflowOp::Sub,
720                     mir::BinOp::Mul => OverflowOp::Mul,
721                     _ => unreachable!(),
722                 };
723                 bx.checked_binop(oop, input_ty, lhs, rhs)
724             }
725             mir::BinOp::Shl | mir::BinOp::Shr => {
726                 let lhs_llty = bx.cx().val_ty(lhs);
727                 let rhs_llty = bx.cx().val_ty(rhs);
728                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
729                 let outer_bits = bx.and(rhs, invert_mask);
730
731                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
732                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
733
734                 (val, of)
735             }
736             _ => bug!("Operator `{:?}` is not a checkable operator", op),
737         };
738
739         OperandValue::Pair(val, of)
740     }
741 }
742
743 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
744     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
745         match *rvalue {
746             mir::Rvalue::Ref(..) |
747             mir::Rvalue::AddressOf(..) |
748             mir::Rvalue::Len(..) |
749             mir::Rvalue::Cast(..) | // (*)
750             mir::Rvalue::ShallowInitBox(..) | // (*)
751             mir::Rvalue::BinaryOp(..) |
752             mir::Rvalue::CheckedBinaryOp(..) |
753             mir::Rvalue::UnaryOp(..) |
754             mir::Rvalue::Discriminant(..) |
755             mir::Rvalue::NullaryOp(..) |
756             mir::Rvalue::ThreadLocalRef(_) |
757             mir::Rvalue::Use(..) => // (*)
758                 true,
759             mir::Rvalue::Repeat(..) |
760             mir::Rvalue::Aggregate(..) => {
761                 let ty = rvalue.ty(self.mir, self.cx.tcx());
762                 let ty = self.monomorphize(ty);
763                 self.cx.spanned_layout_of(ty, span).is_zst()
764             }
765         }
766
767         // (*) this is only true if the type is suitable
768     }
769 }