]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Introduce NullOp::AlignOf
[rust.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate, RealPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_apfloat::{ieee, Float, Round, Status};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_middle::mir;
13 use rustc_middle::ty::cast::{CastTy, IntTy};
14 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
15 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
16 use rustc_span::source_map::{Span, DUMMY_SP};
17 use rustc_target::abi::{Abi, Int, Variants};
18
19 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
20     pub fn codegen_rvalue(
21         &mut self,
22         mut bx: Bx,
23         dest: PlaceRef<'tcx, Bx::Value>,
24         rvalue: &mir::Rvalue<'tcx>,
25     ) -> Bx {
26         debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
27
28         match *rvalue {
29             mir::Rvalue::Use(ref operand) => {
30                 let cg_operand = self.codegen_operand(&mut bx, operand);
31                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
32                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
33                 cg_operand.val.store(&mut bx, dest);
34                 bx
35             }
36
37             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
38                 // The destination necessarily contains a fat pointer, so if
39                 // it's a scalar pair, it's a fat pointer or newtype thereof.
40                 if bx.cx().is_backend_scalar_pair(dest.layout) {
41                     // Into-coerce of a thin pointer to a fat pointer -- just
42                     // use the operand path.
43                     let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
44                     temp.val.store(&mut bx, dest);
45                     return bx;
46                 }
47
48                 // Unsize of a nontrivial struct. I would prefer for
49                 // this to be eliminated by MIR building, but
50                 // `CoerceUnsized` can be passed by a where-clause,
51                 // so the (generic) MIR may not be able to expand it.
52                 let operand = self.codegen_operand(&mut bx, source);
53                 match operand.val {
54                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
55                         // Unsize from an immediate structure. We don't
56                         // really need a temporary alloca here, but
57                         // avoiding it would require us to have
58                         // `coerce_unsized_into` use `extractvalue` to
59                         // index into the struct, and this case isn't
60                         // important enough for it.
61                         debug!("codegen_rvalue: creating ugly alloca");
62                         let scratch = PlaceRef::alloca(&mut bx, operand.layout);
63                         scratch.storage_live(&mut bx);
64                         operand.val.store(&mut bx, scratch);
65                         base::coerce_unsized_into(&mut bx, scratch, dest);
66                         scratch.storage_dead(&mut bx);
67                     }
68                     OperandValue::Ref(llref, None, align) => {
69                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
70                         base::coerce_unsized_into(&mut bx, source, dest);
71                     }
72                     OperandValue::Ref(_, Some(_), _) => {
73                         bug!("unsized coercion on an unsized rvalue");
74                     }
75                 }
76                 bx
77             }
78
79             mir::Rvalue::Repeat(ref elem, count) => {
80                 let cg_elem = self.codegen_operand(&mut bx, elem);
81
82                 // Do not generate the loop for zero-sized elements or empty arrays.
83                 if dest.layout.is_zst() {
84                     return bx;
85                 }
86
87                 if let OperandValue::Immediate(v) = cg_elem.val {
88                     let zero = bx.const_usize(0);
89                     let start = dest.project_index(&mut bx, zero).llval;
90                     let size = bx.const_usize(dest.layout.size.bytes());
91
92                     // Use llvm.memset.p0i8.* to initialize all zero arrays
93                     if bx.cx().const_to_opt_uint(v) == Some(0) {
94                         let fill = bx.cx().const_u8(0);
95                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
96                         return bx;
97                     }
98
99                     // Use llvm.memset.p0i8.* to initialize byte arrays
100                     let v = bx.from_immediate(v);
101                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
102                         bx.memset(start, v, size, dest.align, MemFlags::empty());
103                         return bx;
104                     }
105                 }
106
107                 let count =
108                     self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
109
110                 bx.write_operand_repeatedly(cg_elem, count, dest)
111             }
112
113             mir::Rvalue::Aggregate(ref kind, ref operands) => {
114                 let (dest, active_field_index) = match **kind {
115                     mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
116                         dest.codegen_set_discr(&mut bx, variant_index);
117                         if adt_def.is_enum() {
118                             (dest.project_downcast(&mut bx, variant_index), active_field_index)
119                         } else {
120                             (dest, active_field_index)
121                         }
122                     }
123                     _ => (dest, None),
124                 };
125                 for (i, operand) in operands.iter().enumerate() {
126                     let op = self.codegen_operand(&mut bx, operand);
127                     // Do not generate stores and GEPis for zero-sized fields.
128                     if !op.layout.is_zst() {
129                         let field_index = active_field_index.unwrap_or(i);
130                         let field = dest.project_field(&mut bx, field_index);
131                         op.val.store(&mut bx, field);
132                     }
133                 }
134                 bx
135             }
136
137             _ => {
138                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
139                 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
140                 temp.val.store(&mut bx, dest);
141                 bx
142             }
143         }
144     }
145
146     pub fn codegen_rvalue_unsized(
147         &mut self,
148         mut bx: Bx,
149         indirect_dest: PlaceRef<'tcx, Bx::Value>,
150         rvalue: &mir::Rvalue<'tcx>,
151     ) -> Bx {
152         debug!(
153             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
154             indirect_dest.llval, rvalue
155         );
156
157         match *rvalue {
158             mir::Rvalue::Use(ref operand) => {
159                 let cg_operand = self.codegen_operand(&mut bx, operand);
160                 cg_operand.val.store_unsized(&mut bx, indirect_dest);
161                 bx
162             }
163
164             _ => bug!("unsized assignment other than `Rvalue::Use`"),
165         }
166     }
167
168     pub fn codegen_rvalue_operand(
169         &mut self,
170         mut bx: Bx,
171         rvalue: &mir::Rvalue<'tcx>,
172     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
173         assert!(
174             self.rvalue_creates_operand(rvalue, DUMMY_SP),
175             "cannot codegen {:?} to operand",
176             rvalue,
177         );
178
179         match *rvalue {
180             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
181                 let operand = self.codegen_operand(&mut bx, source);
182                 debug!("cast operand is {:?}", operand);
183                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
184
185                 let val = match *kind {
186                     mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
187                         match *operand.layout.ty.kind() {
188                             ty::FnDef(def_id, substs) => {
189                                 let instance = ty::Instance::resolve_for_fn_ptr(
190                                     bx.tcx(),
191                                     ty::ParamEnv::reveal_all(),
192                                     def_id,
193                                     substs,
194                                 )
195                                 .unwrap()
196                                 .polymorphize(bx.cx().tcx());
197                                 OperandValue::Immediate(bx.get_fn_addr(instance))
198                             }
199                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
200                         }
201                     }
202                     mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
203                         match *operand.layout.ty.kind() {
204                             ty::Closure(def_id, substs) => {
205                                 let instance = Instance::resolve_closure(
206                                     bx.cx().tcx(),
207                                     def_id,
208                                     substs,
209                                     ty::ClosureKind::FnOnce,
210                                 )
211                                 .polymorphize(bx.cx().tcx());
212                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
213                             }
214                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
215                         }
216                     }
217                     mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
218                         // This is a no-op at the LLVM level.
219                         operand.val
220                     }
221                     mir::CastKind::Pointer(PointerCast::Unsize) => {
222                         assert!(bx.cx().is_backend_scalar_pair(cast));
223                         let (lldata, llextra) = match operand.val {
224                             OperandValue::Pair(lldata, llextra) => {
225                                 // unsize from a fat pointer -- this is a
226                                 // "trait-object-to-supertrait" coercion.
227                                 (lldata, Some(llextra))
228                             }
229                             OperandValue::Immediate(lldata) => {
230                                 // "standard" unsize
231                                 (lldata, None)
232                             }
233                             OperandValue::Ref(..) => {
234                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
235                             }
236                         };
237                         let (lldata, llextra) =
238                             base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
239                         OperandValue::Pair(lldata, llextra)
240                     }
241                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
242                     | mir::CastKind::Misc
243                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
244                     {
245                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
246                             if bx.cx().is_backend_scalar_pair(cast) {
247                                 let data_cast = bx.pointercast(
248                                     data_ptr,
249                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
250                                 );
251                                 OperandValue::Pair(data_cast, meta)
252                             } else {
253                                 // cast to thin-ptr
254                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
255                                 // pointer-cast of that pointer to desired pointer type.
256                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
257                                 let llval = bx.pointercast(data_ptr, llcast_ty);
258                                 OperandValue::Immediate(llval)
259                             }
260                         } else {
261                             bug!("unexpected non-pair operand");
262                         }
263                     }
264                     mir::CastKind::Pointer(
265                         PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
266                     )
267                     | mir::CastKind::Misc => {
268                         assert!(bx.cx().is_backend_immediate(cast));
269                         let ll_t_out = bx.cx().immediate_backend_type(cast);
270                         if operand.layout.abi.is_uninhabited() {
271                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
272                             return (bx, OperandRef { val, layout: cast });
273                         }
274                         let r_t_in =
275                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
276                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
277                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
278                         match operand.layout.variants {
279                             Variants::Single { index } => {
280                                 if let Some(discr) =
281                                     operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
282                                 {
283                                     let discr_layout = bx.cx().layout_of(discr.ty);
284                                     let discr_t = bx.cx().immediate_backend_type(discr_layout);
285                                     let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
286                                     let discr_val =
287                                         bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
288
289                                     return (
290                                         bx,
291                                         OperandRef {
292                                             val: OperandValue::Immediate(discr_val),
293                                             layout: cast,
294                                         },
295                                     );
296                                 }
297                             }
298                             Variants::Multiple { .. } => {}
299                         }
300                         let llval = operand.immediate();
301
302                         let mut signed = false;
303                         if let Abi::Scalar(ref scalar) = operand.layout.abi {
304                             if let Int(_, s) = scalar.value {
305                                 // We use `i1` for bytes that are always `0` or `1`,
306                                 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
307                                 // let LLVM interpret the `i1` as signed, because
308                                 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
309                                 signed = !scalar.is_bool() && s;
310
311                                 let er = scalar.valid_range_exclusive(bx.cx());
312                                 if er.end != er.start
313                                     && scalar.valid_range.end >= scalar.valid_range.start
314                                 {
315                                     // We want `table[e as usize ± k]` to not
316                                     // have bound checks, and this is the most
317                                     // convenient place to put the `assume`s.
318                                     if scalar.valid_range.start > 0 {
319                                         let enum_value_lower_bound = bx
320                                             .cx()
321                                             .const_uint_big(ll_t_in, scalar.valid_range.start);
322                                         let cmp_start = bx.icmp(
323                                             IntPredicate::IntUGE,
324                                             llval,
325                                             enum_value_lower_bound,
326                                         );
327                                         bx.assume(cmp_start);
328                                     }
329
330                                     let enum_value_upper_bound =
331                                         bx.cx().const_uint_big(ll_t_in, scalar.valid_range.end);
332                                     let cmp_end = bx.icmp(
333                                         IntPredicate::IntULE,
334                                         llval,
335                                         enum_value_upper_bound,
336                                     );
337                                     bx.assume(cmp_end);
338                                 }
339                             }
340                         }
341
342                         let newval = match (r_t_in, r_t_out) {
343                             (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
344                             (CastTy::Float, CastTy::Float) => {
345                                 let srcsz = bx.cx().float_width(ll_t_in);
346                                 let dstsz = bx.cx().float_width(ll_t_out);
347                                 if dstsz > srcsz {
348                                     bx.fpext(llval, ll_t_out)
349                                 } else if srcsz > dstsz {
350                                     bx.fptrunc(llval, ll_t_out)
351                                 } else {
352                                     llval
353                                 }
354                             }
355                             (CastTy::Int(_), CastTy::Float) => {
356                                 if signed {
357                                     bx.sitofp(llval, ll_t_out)
358                                 } else {
359                                     bx.uitofp(llval, ll_t_out)
360                                 }
361                             }
362                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
363                                 bx.pointercast(llval, ll_t_out)
364                             }
365                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
366                                 bx.ptrtoint(llval, ll_t_out)
367                             }
368                             (CastTy::Int(_), CastTy::Ptr(_)) => {
369                                 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
370                                 bx.inttoptr(usize_llval, ll_t_out)
371                             }
372                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
373                                 cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out)
374                             }
375                             (CastTy::Float, CastTy::Int(_)) => {
376                                 cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out)
377                             }
378                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
379                         };
380                         OperandValue::Immediate(newval)
381                     }
382                 };
383                 (bx, OperandRef { val, layout: cast })
384             }
385
386             mir::Rvalue::Ref(_, bk, place) => {
387                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
388                     tcx.mk_ref(
389                         tcx.lifetimes.re_erased,
390                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
391                     )
392                 };
393                 self.codegen_place_to_pointer(bx, place, mk_ref)
394             }
395
396             mir::Rvalue::AddressOf(mutability, place) => {
397                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
398                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
399                 };
400                 self.codegen_place_to_pointer(bx, place, mk_ptr)
401             }
402
403             mir::Rvalue::Len(place) => {
404                 let size = self.evaluate_array_len(&mut bx, place);
405                 let operand = OperandRef {
406                     val: OperandValue::Immediate(size),
407                     layout: bx.cx().layout_of(bx.tcx().types.usize),
408                 };
409                 (bx, operand)
410             }
411
412             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
413                 let lhs = self.codegen_operand(&mut bx, lhs);
414                 let rhs = self.codegen_operand(&mut bx, rhs);
415                 let llresult = match (lhs.val, rhs.val) {
416                     (
417                         OperandValue::Pair(lhs_addr, lhs_extra),
418                         OperandValue::Pair(rhs_addr, rhs_extra),
419                     ) => self.codegen_fat_ptr_binop(
420                         &mut bx,
421                         op,
422                         lhs_addr,
423                         lhs_extra,
424                         rhs_addr,
425                         rhs_extra,
426                         lhs.layout.ty,
427                     ),
428
429                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
430                         self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
431                     }
432
433                     _ => bug!(),
434                 };
435                 let operand = OperandRef {
436                     val: OperandValue::Immediate(llresult),
437                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
438                 };
439                 (bx, operand)
440             }
441             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
442                 let lhs = self.codegen_operand(&mut bx, lhs);
443                 let rhs = self.codegen_operand(&mut bx, rhs);
444                 let result = self.codegen_scalar_checked_binop(
445                     &mut bx,
446                     op,
447                     lhs.immediate(),
448                     rhs.immediate(),
449                     lhs.layout.ty,
450                 );
451                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
452                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
453                 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
454
455                 (bx, operand)
456             }
457
458             mir::Rvalue::UnaryOp(op, ref operand) => {
459                 let operand = self.codegen_operand(&mut bx, operand);
460                 let lloperand = operand.immediate();
461                 let is_float = operand.layout.ty.is_floating_point();
462                 let llval = match op {
463                     mir::UnOp::Not => bx.not(lloperand),
464                     mir::UnOp::Neg => {
465                         if is_float {
466                             bx.fneg(lloperand)
467                         } else {
468                             bx.neg(lloperand)
469                         }
470                     }
471                 };
472                 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
473             }
474
475             mir::Rvalue::Discriminant(ref place) => {
476                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
477                 let discr_ty = self.monomorphize(discr_ty);
478                 let discr = self
479                     .codegen_place(&mut bx, place.as_ref())
480                     .codegen_get_discr(&mut bx, discr_ty);
481                 (
482                     bx,
483                     OperandRef {
484                         val: OperandValue::Immediate(discr),
485                         layout: self.cx.layout_of(discr_ty),
486                     },
487                 )
488             }
489
490             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
491                 let content_ty = self.monomorphize(content_ty);
492                 let content_layout = bx.cx().layout_of(content_ty);
493                 let llsize = bx.cx().const_usize(content_layout.size.bytes());
494                 let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
495                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
496                 let llty_ptr = bx.cx().backend_type(box_layout);
497
498                 // Allocate space:
499                 let def_id = match bx.tcx().lang_items().require(LangItem::ExchangeMalloc) {
500                     Ok(id) => id,
501                     Err(s) => {
502                         bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
503                     }
504                 };
505                 let instance = ty::Instance::mono(bx.tcx(), def_id);
506                 let r = bx.cx().get_fn_addr(instance);
507                 let ty = bx.type_func(&[bx.type_isize(), bx.type_isize()], bx.type_i8p());
508                 let call = bx.call(ty, r, &[llsize, llalign], None);
509                 let val = bx.pointercast(call, llty_ptr);
510
511                 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
512                 (bx, operand)
513             }
514
515             mir::Rvalue::NullaryOp(null_op, ty) => {
516                 let ty = self.monomorphize(ty);
517                 assert!(bx.cx().type_is_sized(ty));
518                 let layout = bx.cx().layout_of(ty);
519                 let val = match null_op {
520                     mir::NullOp::SizeOf => layout.size.bytes(),
521                     mir::NullOp::AlignOf => layout.align.abi.bytes(),
522                     mir::NullOp::Box => unreachable!(),
523                 };
524                 let val = bx.cx().const_usize(val);
525                 let tcx = self.cx.tcx();
526                 (
527                     bx,
528                     OperandRef {
529                         val: OperandValue::Immediate(val),
530                         layout: self.cx.layout_of(tcx.types.usize),
531                     },
532                 )
533             }
534
535             mir::Rvalue::ThreadLocalRef(def_id) => {
536                 assert!(bx.cx().tcx().is_static(def_id));
537                 let static_ = bx.get_static(def_id);
538                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
539                 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
540                 (bx, operand)
541             }
542             mir::Rvalue::Use(ref operand) => {
543                 let operand = self.codegen_operand(&mut bx, operand);
544                 (bx, operand)
545             }
546             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
547                 // According to `rvalue_creates_operand`, only ZST
548                 // aggregate rvalues are allowed to be operands.
549                 let ty = rvalue.ty(self.mir, self.cx.tcx());
550                 let operand =
551                     OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
552                 (bx, operand)
553             }
554         }
555     }
556
557     fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
558         // ZST are passed as operands and require special handling
559         // because codegen_place() panics if Local is operand.
560         if let Some(index) = place.as_local() {
561             if let LocalRef::Operand(Some(op)) = self.locals[index] {
562                 if let ty::Array(_, n) = op.layout.ty.kind() {
563                     let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
564                     return bx.cx().const_usize(n);
565                 }
566             }
567         }
568         // use common size calculation for non zero-sized types
569         let cg_value = self.codegen_place(bx, place.as_ref());
570         cg_value.len(bx.cx())
571     }
572
573     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
574     fn codegen_place_to_pointer(
575         &mut self,
576         mut bx: Bx,
577         place: mir::Place<'tcx>,
578         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
579     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
580         let cg_place = self.codegen_place(&mut bx, place.as_ref());
581
582         let ty = cg_place.layout.ty;
583
584         // Note: places are indirect, so storing the `llval` into the
585         // destination effectively creates a reference.
586         let val = if !bx.cx().type_has_metadata(ty) {
587             OperandValue::Immediate(cg_place.llval)
588         } else {
589             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
590         };
591         (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
592     }
593
594     pub fn codegen_scalar_binop(
595         &mut self,
596         bx: &mut Bx,
597         op: mir::BinOp,
598         lhs: Bx::Value,
599         rhs: Bx::Value,
600         input_ty: Ty<'tcx>,
601     ) -> Bx::Value {
602         let is_float = input_ty.is_floating_point();
603         let is_signed = input_ty.is_signed();
604         match op {
605             mir::BinOp::Add => {
606                 if is_float {
607                     bx.fadd(lhs, rhs)
608                 } else {
609                     bx.add(lhs, rhs)
610                 }
611             }
612             mir::BinOp::Sub => {
613                 if is_float {
614                     bx.fsub(lhs, rhs)
615                 } else {
616                     bx.sub(lhs, rhs)
617                 }
618             }
619             mir::BinOp::Mul => {
620                 if is_float {
621                     bx.fmul(lhs, rhs)
622                 } else {
623                     bx.mul(lhs, rhs)
624                 }
625             }
626             mir::BinOp::Div => {
627                 if is_float {
628                     bx.fdiv(lhs, rhs)
629                 } else if is_signed {
630                     bx.sdiv(lhs, rhs)
631                 } else {
632                     bx.udiv(lhs, rhs)
633                 }
634             }
635             mir::BinOp::Rem => {
636                 if is_float {
637                     bx.frem(lhs, rhs)
638                 } else if is_signed {
639                     bx.srem(lhs, rhs)
640                 } else {
641                     bx.urem(lhs, rhs)
642                 }
643             }
644             mir::BinOp::BitOr => bx.or(lhs, rhs),
645             mir::BinOp::BitAnd => bx.and(lhs, rhs),
646             mir::BinOp::BitXor => bx.xor(lhs, rhs),
647             mir::BinOp::Offset => {
648                 let pointee_type = input_ty
649                     .builtin_deref(true)
650                     .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
651                     .ty;
652                 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
653                 bx.inbounds_gep(llty, lhs, &[rhs])
654             }
655             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
656             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
657             mir::BinOp::Ne
658             | mir::BinOp::Lt
659             | mir::BinOp::Gt
660             | mir::BinOp::Eq
661             | mir::BinOp::Le
662             | mir::BinOp::Ge => {
663                 if is_float {
664                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
665                 } else {
666                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
667                 }
668             }
669         }
670     }
671
672     pub fn codegen_fat_ptr_binop(
673         &mut self,
674         bx: &mut Bx,
675         op: mir::BinOp,
676         lhs_addr: Bx::Value,
677         lhs_extra: Bx::Value,
678         rhs_addr: Bx::Value,
679         rhs_extra: Bx::Value,
680         _input_ty: Ty<'tcx>,
681     ) -> Bx::Value {
682         match op {
683             mir::BinOp::Eq => {
684                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
685                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
686                 bx.and(lhs, rhs)
687             }
688             mir::BinOp::Ne => {
689                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
690                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
691                 bx.or(lhs, rhs)
692             }
693             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
694                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
695                 let (op, strict_op) = match op {
696                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
697                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
698                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
699                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
700                     _ => bug!(),
701                 };
702                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
703                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
704                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
705                 let rhs = bx.and(and_lhs, and_rhs);
706                 bx.or(lhs, rhs)
707             }
708             _ => {
709                 bug!("unexpected fat ptr binop");
710             }
711         }
712     }
713
714     pub fn codegen_scalar_checked_binop(
715         &mut self,
716         bx: &mut Bx,
717         op: mir::BinOp,
718         lhs: Bx::Value,
719         rhs: Bx::Value,
720         input_ty: Ty<'tcx>,
721     ) -> OperandValue<Bx::Value> {
722         // This case can currently arise only from functions marked
723         // with #[rustc_inherit_overflow_checks] and inlined from
724         // another crate (mostly core::num generic/#[inline] fns),
725         // while the current crate doesn't use overflow checks.
726         if !bx.cx().check_overflow() {
727             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
728             return OperandValue::Pair(val, bx.cx().const_bool(false));
729         }
730
731         let (val, of) = match op {
732             // These are checked using intrinsics
733             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
734                 let oop = match op {
735                     mir::BinOp::Add => OverflowOp::Add,
736                     mir::BinOp::Sub => OverflowOp::Sub,
737                     mir::BinOp::Mul => OverflowOp::Mul,
738                     _ => unreachable!(),
739                 };
740                 bx.checked_binop(oop, input_ty, lhs, rhs)
741             }
742             mir::BinOp::Shl | mir::BinOp::Shr => {
743                 let lhs_llty = bx.cx().val_ty(lhs);
744                 let rhs_llty = bx.cx().val_ty(rhs);
745                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
746                 let outer_bits = bx.and(rhs, invert_mask);
747
748                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
749                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
750
751                 (val, of)
752             }
753             _ => bug!("Operator `{:?}` is not a checkable operator", op),
754         };
755
756         OperandValue::Pair(val, of)
757     }
758 }
759
760 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
761     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
762         match *rvalue {
763             mir::Rvalue::Ref(..) |
764             mir::Rvalue::AddressOf(..) |
765             mir::Rvalue::Len(..) |
766             mir::Rvalue::Cast(..) | // (*)
767             mir::Rvalue::BinaryOp(..) |
768             mir::Rvalue::CheckedBinaryOp(..) |
769             mir::Rvalue::UnaryOp(..) |
770             mir::Rvalue::Discriminant(..) |
771             mir::Rvalue::NullaryOp(..) |
772             mir::Rvalue::ThreadLocalRef(_) |
773             mir::Rvalue::Use(..) => // (*)
774                 true,
775             mir::Rvalue::Repeat(..) |
776             mir::Rvalue::Aggregate(..) => {
777                 let ty = rvalue.ty(self.mir, self.cx.tcx());
778                 let ty = self.monomorphize(ty);
779                 self.cx.spanned_layout_of(ty, span).is_zst()
780             }
781         }
782
783         // (*) this is only true if the type is suitable
784     }
785 }
786
787 fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
788     bx: &mut Bx,
789     signed: bool,
790     x: Bx::Value,
791     float_ty: Bx::Type,
792     int_ty: Bx::Type,
793 ) -> Bx::Value {
794     if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts {
795         return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
796     }
797
798     let try_sat_result = if signed { bx.fptosi_sat(x, int_ty) } else { bx.fptoui_sat(x, int_ty) };
799     if let Some(try_sat_result) = try_sat_result {
800         return try_sat_result;
801     }
802
803     let int_width = bx.cx().int_width(int_ty);
804     let float_width = bx.cx().float_width(float_ty);
805     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
806     // destination integer type after rounding towards zero. This `undef` value can cause UB in
807     // safe code (see issue #10184), so we implement a saturating conversion on top of it:
808     // Semantically, the mathematical value of the input is rounded towards zero to the next
809     // mathematical integer, and then the result is clamped into the range of the destination
810     // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
811     // the destination integer type. NaN is mapped to 0.
812     //
813     // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
814     // a value representable in int_ty.
815     // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
816     // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
817     // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
818     // representable. Note that this only works if float_ty's exponent range is sufficiently large.
819     // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
820     // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
821     // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
822     // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
823     // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
824     let int_max = |signed: bool, int_width: u64| -> u128 {
825         let shift_amount = 128 - int_width;
826         if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
827     };
828     let int_min = |signed: bool, int_width: u64| -> i128 {
829         if signed { i128::MIN >> (128 - int_width) } else { 0 }
830     };
831
832     let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
833         let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
834         assert_eq!(rounded_min.status, Status::OK);
835         let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
836         assert!(rounded_max.value.is_finite());
837         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
838     };
839     let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
840         let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
841         assert_eq!(rounded_min.status, Status::OK);
842         let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
843         assert!(rounded_max.value.is_finite());
844         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
845     };
846
847     let mut float_bits_to_llval = |bits| {
848         let bits_llval = match float_width {
849             32 => bx.cx().const_u32(bits as u32),
850             64 => bx.cx().const_u64(bits as u64),
851             n => bug!("unsupported float width {}", n),
852         };
853         bx.bitcast(bits_llval, float_ty)
854     };
855     let (f_min, f_max) = match float_width {
856         32 => compute_clamp_bounds_single(signed, int_width),
857         64 => compute_clamp_bounds_double(signed, int_width),
858         n => bug!("unsupported float width {}", n),
859     };
860     let f_min = float_bits_to_llval(f_min);
861     let f_max = float_bits_to_llval(f_max);
862     // To implement saturation, we perform the following steps:
863     //
864     // 1. Cast x to an integer with fpto[su]i. This may result in undef.
865     // 2. Compare x to f_min and f_max, and use the comparison results to select:
866     //  a) int_ty::MIN if x < f_min or x is NaN
867     //  b) int_ty::MAX if x > f_max
868     //  c) the result of fpto[su]i otherwise
869     // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
870     //
871     // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
872     // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
873     // undef does not introduce any non-determinism either.
874     // More importantly, the above procedure correctly implements saturating conversion.
875     // Proof (sketch):
876     // If x is NaN, 0 is returned by definition.
877     // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
878     // This yields three cases to consider:
879     // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
880     //     saturating conversion for inputs in that range.
881     // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
882     //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
883     //     than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
884     //     is correct.
885     // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
886     //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
887     // QED.
888
889     let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
890     let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
891     let zero = bx.cx().const_uint(int_ty, 0);
892
893     // Step 1 ...
894     let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
895     let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
896     let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
897
898     // Step 2: We use two comparisons and two selects, with %s1 being the
899     // result:
900     //     %less_or_nan = fcmp ult %x, %f_min
901     //     %greater = fcmp olt %x, %f_max
902     //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
903     //     %s1 = select %greater, int_ty::MAX, %s0
904     // Note that %less_or_nan uses an *unordered* comparison. This
905     // comparison is true if the operands are not comparable (i.e., if x is
906     // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
907     // x is NaN.
908     //
909     // Performance note: Unordered comparison can be lowered to a "flipped"
910     // comparison and a negation, and the negation can be merged into the
911     // select. Therefore, it not necessarily any more expensive than an
912     // ordered ("normal") comparison. Whether these optimizations will be
913     // performed is ultimately up to the backend, but at least x86 does
914     // perform them.
915     let s0 = bx.select(less_or_nan, int_min, fptosui_result);
916     let s1 = bx.select(greater, int_max, s0);
917
918     // Step 3: NaN replacement.
919     // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
920     // Therefore we only need to execute this step for signed integer types.
921     if signed {
922         // LLVM has no isNaN predicate, so we use (x == x) instead
923         let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
924         bx.select(cmp, s1, zero)
925     } else {
926         s1
927     }
928 }