]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/rvalue.rs
Rollup merge of #68023 - FSciammarella:master, r=Centril,varkor
[rust.git] / src / librustc_codegen_ssa / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate, RealPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
11 use rustc::mir;
12 use rustc::ty::cast::{CastTy, IntTy};
13 use rustc::ty::layout::{self, HasTyCtxt, LayoutOf};
14 use rustc::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
15 use rustc_apfloat::{ieee, Float, Round, Status};
16 use rustc_span::source_map::{Span, DUMMY_SP};
17 use rustc_span::symbol::sym;
18
19 use std::{i128, u128};
20
21 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
22     pub fn codegen_rvalue(
23         &mut self,
24         mut bx: Bx,
25         dest: PlaceRef<'tcx, Bx::Value>,
26         rvalue: &mir::Rvalue<'tcx>,
27     ) -> Bx {
28         debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
29
30         match *rvalue {
31             mir::Rvalue::Use(ref operand) => {
32                 let cg_operand = self.codegen_operand(&mut bx, operand);
33                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
34                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
35                 cg_operand.val.store(&mut bx, dest);
36                 bx
37             }
38
39             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
40                 // The destination necessarily contains a fat pointer, so if
41                 // it's a scalar pair, it's a fat pointer or newtype thereof.
42                 if bx.cx().is_backend_scalar_pair(dest.layout) {
43                     // Into-coerce of a thin pointer to a fat pointer -- just
44                     // use the operand path.
45                     let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
46                     temp.val.store(&mut bx, dest);
47                     return bx;
48                 }
49
50                 // Unsize of a nontrivial struct. I would prefer for
51                 // this to be eliminated by MIR building, but
52                 // `CoerceUnsized` can be passed by a where-clause,
53                 // so the (generic) MIR may not be able to expand it.
54                 let operand = self.codegen_operand(&mut bx, source);
55                 match operand.val {
56                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
57                         // Unsize from an immediate structure. We don't
58                         // really need a temporary alloca here, but
59                         // avoiding it would require us to have
60                         // `coerce_unsized_into` use `extractvalue` to
61                         // index into the struct, and this case isn't
62                         // important enough for it.
63                         debug!("codegen_rvalue: creating ugly alloca");
64                         let scratch = PlaceRef::alloca(&mut bx, operand.layout);
65                         scratch.storage_live(&mut bx);
66                         operand.val.store(&mut bx, scratch);
67                         base::coerce_unsized_into(&mut bx, scratch, dest);
68                         scratch.storage_dead(&mut bx);
69                     }
70                     OperandValue::Ref(llref, None, align) => {
71                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
72                         base::coerce_unsized_into(&mut bx, source, dest);
73                     }
74                     OperandValue::Ref(_, Some(_), _) => {
75                         bug!("unsized coercion on an unsized rvalue");
76                     }
77                 }
78                 bx
79             }
80
81             mir::Rvalue::Repeat(ref elem, count) => {
82                 let cg_elem = self.codegen_operand(&mut bx, elem);
83
84                 // Do not generate the loop for zero-sized elements or empty arrays.
85                 if dest.layout.is_zst() {
86                     return bx;
87                 }
88
89                 if let OperandValue::Immediate(v) = cg_elem.val {
90                     let zero = bx.const_usize(0);
91                     let start = dest.project_index(&mut bx, zero).llval;
92                     let size = bx.const_usize(dest.layout.size.bytes());
93
94                     // Use llvm.memset.p0i8.* to initialize all zero arrays
95                     if bx.cx().const_to_opt_uint(v) == Some(0) {
96                         let fill = bx.cx().const_u8(0);
97                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
98                         return bx;
99                     }
100
101                     // Use llvm.memset.p0i8.* to initialize byte arrays
102                     let v = base::from_immediate(&mut bx, v);
103                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
104                         bx.memset(start, v, size, dest.align, MemFlags::empty());
105                         return bx;
106                     }
107                 }
108
109                 bx.write_operand_repeatedly(cg_elem, count, dest)
110             }
111
112             mir::Rvalue::Aggregate(ref kind, ref operands) => {
113                 let (dest, active_field_index) = match **kind {
114                     mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
115                         dest.codegen_set_discr(&mut bx, variant_index);
116                         if adt_def.is_enum() {
117                             (dest.project_downcast(&mut bx, variant_index), active_field_index)
118                         } else {
119                             (dest, active_field_index)
120                         }
121                     }
122                     _ => (dest, None),
123                 };
124                 for (i, operand) in operands.iter().enumerate() {
125                     let op = self.codegen_operand(&mut bx, operand);
126                     // Do not generate stores and GEPis for zero-sized fields.
127                     if !op.layout.is_zst() {
128                         let field_index = active_field_index.unwrap_or(i);
129                         let field = dest.project_field(&mut bx, field_index);
130                         op.val.store(&mut bx, field);
131                     }
132                 }
133                 bx
134             }
135
136             _ => {
137                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
138                 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
139                 temp.val.store(&mut bx, dest);
140                 bx
141             }
142         }
143     }
144
145     pub fn codegen_rvalue_unsized(
146         &mut self,
147         mut bx: Bx,
148         indirect_dest: PlaceRef<'tcx, Bx::Value>,
149         rvalue: &mir::Rvalue<'tcx>,
150     ) -> Bx {
151         debug!(
152             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
153             indirect_dest.llval, rvalue
154         );
155
156         match *rvalue {
157             mir::Rvalue::Use(ref operand) => {
158                 let cg_operand = self.codegen_operand(&mut bx, operand);
159                 cg_operand.val.store_unsized(&mut bx, indirect_dest);
160                 bx
161             }
162
163             _ => bug!("unsized assignment other than `Rvalue::Use`"),
164         }
165     }
166
167     pub fn codegen_rvalue_operand(
168         &mut self,
169         mut bx: Bx,
170         rvalue: &mir::Rvalue<'tcx>,
171     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
172         assert!(
173             self.rvalue_creates_operand(rvalue, DUMMY_SP),
174             "cannot codegen {:?} to operand",
175             rvalue,
176         );
177
178         match *rvalue {
179             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
180                 let operand = self.codegen_operand(&mut bx, source);
181                 debug!("cast operand is {:?}", operand);
182                 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
183
184                 let val = match *kind {
185                     mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
186                         match operand.layout.ty.kind {
187                             ty::FnDef(def_id, substs) => {
188                                 if bx.cx().tcx().has_attr(def_id, sym::rustc_args_required_const) {
189                                     bug!("reifying a fn ptr that requires const arguments");
190                                 }
191                                 OperandValue::Immediate(
192                                     bx.get_fn_addr(
193                                         ty::Instance::resolve_for_fn_ptr(
194                                             bx.tcx(),
195                                             ty::ParamEnv::reveal_all(),
196                                             def_id,
197                                             substs,
198                                         )
199                                         .unwrap(),
200                                     ),
201                                 )
202                             }
203                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
204                         }
205                     }
206                     mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
207                         match operand.layout.ty.kind {
208                             ty::Closure(def_id, substs) => {
209                                 let instance = Instance::resolve_closure(
210                                     bx.cx().tcx(),
211                                     def_id,
212                                     substs,
213                                     ty::ClosureKind::FnOnce,
214                                 );
215                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
216                             }
217                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
218                         }
219                     }
220                     mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
221                         // This is a no-op at the LLVM level.
222                         operand.val
223                     }
224                     mir::CastKind::Pointer(PointerCast::Unsize) => {
225                         assert!(bx.cx().is_backend_scalar_pair(cast));
226                         match operand.val {
227                             OperandValue::Pair(lldata, llextra) => {
228                                 // unsize from a fat pointer -- this is a
229                                 // "trait-object-to-supertrait" coercion, for
230                                 // example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
231
232                                 // HACK(eddyb) have to bitcast pointers
233                                 // until LLVM removes pointee types.
234                                 let lldata = bx.pointercast(
235                                     lldata,
236                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
237                                 );
238                                 OperandValue::Pair(lldata, llextra)
239                             }
240                             OperandValue::Immediate(lldata) => {
241                                 // "standard" unsize
242                                 let (lldata, llextra) = base::unsize_thin_ptr(
243                                     &mut bx,
244                                     lldata,
245                                     operand.layout.ty,
246                                     cast.ty,
247                                 );
248                                 OperandValue::Pair(lldata, llextra)
249                             }
250                             OperandValue::Ref(..) => {
251                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
252                             }
253                         }
254                     }
255                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
256                     | mir::CastKind::Misc
257                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
258                     {
259                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
260                             if bx.cx().is_backend_scalar_pair(cast) {
261                                 let data_cast = bx.pointercast(
262                                     data_ptr,
263                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
264                                 );
265                                 OperandValue::Pair(data_cast, meta)
266                             } else {
267                                 // cast to thin-ptr
268                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
269                                 // pointer-cast of that pointer to desired pointer type.
270                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
271                                 let llval = bx.pointercast(data_ptr, llcast_ty);
272                                 OperandValue::Immediate(llval)
273                             }
274                         } else {
275                             bug!("unexpected non-pair operand");
276                         }
277                     }
278                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
279                     | mir::CastKind::Pointer(PointerCast::ArrayToPointer)
280                     | mir::CastKind::Misc => {
281                         assert!(bx.cx().is_backend_immediate(cast));
282                         let ll_t_out = bx.cx().immediate_backend_type(cast);
283                         if operand.layout.abi.is_uninhabited() {
284                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
285                             return (bx, OperandRef { val, layout: cast });
286                         }
287                         let r_t_in =
288                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
289                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
290                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
291                         match operand.layout.variants {
292                             layout::Variants::Single { index } => {
293                                 if let Some(discr) =
294                                     operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
295                                 {
296                                     let discr_val = bx.cx().const_uint_big(ll_t_out, discr.val);
297                                     return (
298                                         bx,
299                                         OperandRef {
300                                             val: OperandValue::Immediate(discr_val),
301                                             layout: cast,
302                                         },
303                                     );
304                                 }
305                             }
306                             layout::Variants::Multiple { .. } => {}
307                         }
308                         let llval = operand.immediate();
309
310                         let mut signed = false;
311                         if let layout::Abi::Scalar(ref scalar) = operand.layout.abi {
312                             if let layout::Int(_, s) = scalar.value {
313                                 // We use `i1` for bytes that are always `0` or `1`,
314                                 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
315                                 // let LLVM interpret the `i1` as signed, because
316                                 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
317                                 signed = !scalar.is_bool() && s;
318
319                                 let er = scalar.valid_range_exclusive(bx.cx());
320                                 if er.end != er.start
321                                     && scalar.valid_range.end() > scalar.valid_range.start()
322                                 {
323                                     // We want `table[e as usize]` to not
324                                     // have bound checks, and this is the most
325                                     // convenient place to put the `assume`.
326                                     let ll_t_in_const =
327                                         bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
328                                     let cmp = bx.icmp(IntPredicate::IntULE, llval, ll_t_in_const);
329                                     bx.assume(cmp);
330                                 }
331                             }
332                         }
333
334                         let newval = match (r_t_in, r_t_out) {
335                             (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
336                             (CastTy::Float, CastTy::Float) => {
337                                 let srcsz = bx.cx().float_width(ll_t_in);
338                                 let dstsz = bx.cx().float_width(ll_t_out);
339                                 if dstsz > srcsz {
340                                     bx.fpext(llval, ll_t_out)
341                                 } else if srcsz > dstsz {
342                                     bx.fptrunc(llval, ll_t_out)
343                                 } else {
344                                     llval
345                                 }
346                             }
347                             (CastTy::Int(_), CastTy::Float) => {
348                                 if signed {
349                                     bx.sitofp(llval, ll_t_out)
350                                 } else {
351                                     bx.uitofp(llval, ll_t_out)
352                                 }
353                             }
354                             (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_)) => {
355                                 bx.pointercast(llval, ll_t_out)
356                             }
357                             (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => {
358                                 bx.ptrtoint(llval, ll_t_out)
359                             }
360                             (CastTy::Int(_), CastTy::Ptr(_)) => {
361                                 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
362                                 bx.inttoptr(usize_llval, ll_t_out)
363                             }
364                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
365                                 cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out)
366                             }
367                             (CastTy::Float, CastTy::Int(_)) => {
368                                 cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out)
369                             }
370                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
371                         };
372                         OperandValue::Immediate(newval)
373                     }
374                 };
375                 (bx, OperandRef { val, layout: cast })
376             }
377
378             mir::Rvalue::Ref(_, bk, ref place) => {
379                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
380                     tcx.mk_ref(
381                         tcx.lifetimes.re_erased,
382                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
383                     )
384                 };
385                 self.codegen_place_to_pointer(bx, place, mk_ref)
386             }
387
388             mir::Rvalue::AddressOf(mutability, ref place) => {
389                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
390                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability.into() })
391                 };
392                 self.codegen_place_to_pointer(bx, place, mk_ptr)
393             }
394
395             mir::Rvalue::Len(ref place) => {
396                 let size = self.evaluate_array_len(&mut bx, place);
397                 let operand = OperandRef {
398                     val: OperandValue::Immediate(size),
399                     layout: bx.cx().layout_of(bx.tcx().types.usize),
400                 };
401                 (bx, operand)
402             }
403
404             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
405                 let lhs = self.codegen_operand(&mut bx, lhs);
406                 let rhs = self.codegen_operand(&mut bx, rhs);
407                 let llresult = match (lhs.val, rhs.val) {
408                     (
409                         OperandValue::Pair(lhs_addr, lhs_extra),
410                         OperandValue::Pair(rhs_addr, rhs_extra),
411                     ) => self.codegen_fat_ptr_binop(
412                         &mut bx,
413                         op,
414                         lhs_addr,
415                         lhs_extra,
416                         rhs_addr,
417                         rhs_extra,
418                         lhs.layout.ty,
419                     ),
420
421                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
422                         self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
423                     }
424
425                     _ => bug!(),
426                 };
427                 let operand = OperandRef {
428                     val: OperandValue::Immediate(llresult),
429                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
430                 };
431                 (bx, operand)
432             }
433             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
434                 let lhs = self.codegen_operand(&mut bx, lhs);
435                 let rhs = self.codegen_operand(&mut bx, rhs);
436                 let result = self.codegen_scalar_checked_binop(
437                     &mut bx,
438                     op,
439                     lhs.immediate(),
440                     rhs.immediate(),
441                     lhs.layout.ty,
442                 );
443                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
444                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
445                 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
446
447                 (bx, operand)
448             }
449
450             mir::Rvalue::UnaryOp(op, ref operand) => {
451                 let operand = self.codegen_operand(&mut bx, operand);
452                 let lloperand = operand.immediate();
453                 let is_float = operand.layout.ty.is_floating_point();
454                 let llval = match op {
455                     mir::UnOp::Not => bx.not(lloperand),
456                     mir::UnOp::Neg => {
457                         if is_float {
458                             bx.fneg(lloperand)
459                         } else {
460                             bx.neg(lloperand)
461                         }
462                     }
463                 };
464                 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
465             }
466
467             mir::Rvalue::Discriminant(ref place) => {
468                 let discr_ty = rvalue.ty(*self.mir, bx.tcx());
469                 let discr = self
470                     .codegen_place(&mut bx, &place.as_ref())
471                     .codegen_get_discr(&mut bx, discr_ty);
472                 (
473                     bx,
474                     OperandRef {
475                         val: OperandValue::Immediate(discr),
476                         layout: self.cx.layout_of(discr_ty),
477                     },
478                 )
479             }
480
481             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
482                 assert!(bx.cx().type_is_sized(ty));
483                 let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
484                 let tcx = self.cx.tcx();
485                 (
486                     bx,
487                     OperandRef {
488                         val: OperandValue::Immediate(val),
489                         layout: self.cx.layout_of(tcx.types.usize),
490                     },
491                 )
492             }
493
494             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
495                 let content_ty = self.monomorphize(&content_ty);
496                 let content_layout = bx.cx().layout_of(content_ty);
497                 let llsize = bx.cx().const_usize(content_layout.size.bytes());
498                 let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
499                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
500                 let llty_ptr = bx.cx().backend_type(box_layout);
501
502                 // Allocate space:
503                 let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
504                     Ok(id) => id,
505                     Err(s) => {
506                         bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
507                     }
508                 };
509                 let instance = ty::Instance::mono(bx.tcx(), def_id);
510                 let r = bx.cx().get_fn_addr(instance);
511                 let call = bx.call(r, &[llsize, llalign], None);
512                 let val = bx.pointercast(call, llty_ptr);
513
514                 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
515                 (bx, operand)
516             }
517             mir::Rvalue::Use(ref operand) => {
518                 let operand = self.codegen_operand(&mut bx, operand);
519                 (bx, operand)
520             }
521             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
522                 // According to `rvalue_creates_operand`, only ZST
523                 // aggregate rvalues are allowed to be operands.
524                 let ty = rvalue.ty(*self.mir, self.cx.tcx());
525                 let operand =
526                     OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(&ty)));
527                 (bx, operand)
528             }
529         }
530     }
531
532     fn evaluate_array_len(&mut self, bx: &mut Bx, place: &mir::Place<'tcx>) -> Bx::Value {
533         // ZST are passed as operands and require special handling
534         // because codegen_place() panics if Local is operand.
535         if let Some(index) = place.as_local() {
536             if let LocalRef::Operand(Some(op)) = self.locals[index] {
537                 if let ty::Array(_, n) = op.layout.ty.kind {
538                     let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
539                     return bx.cx().const_usize(n);
540                 }
541             }
542         }
543         // use common size calculation for non zero-sized types
544         let cg_value = self.codegen_place(bx, &place.as_ref());
545         cg_value.len(bx.cx())
546     }
547
548     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
549     fn codegen_place_to_pointer(
550         &mut self,
551         mut bx: Bx,
552         place: &mir::Place<'tcx>,
553         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
554     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
555         let cg_place = self.codegen_place(&mut bx, &place.as_ref());
556
557         let ty = cg_place.layout.ty;
558
559         // Note: places are indirect, so storing the `llval` into the
560         // destination effectively creates a reference.
561         let val = if !bx.cx().type_has_metadata(ty) {
562             OperandValue::Immediate(cg_place.llval)
563         } else {
564             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
565         };
566         (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
567     }
568
569     pub fn codegen_scalar_binop(
570         &mut self,
571         bx: &mut Bx,
572         op: mir::BinOp,
573         lhs: Bx::Value,
574         rhs: Bx::Value,
575         input_ty: Ty<'tcx>,
576     ) -> Bx::Value {
577         let is_float = input_ty.is_floating_point();
578         let is_signed = input_ty.is_signed();
579         match op {
580             mir::BinOp::Add => {
581                 if is_float {
582                     bx.fadd(lhs, rhs)
583                 } else {
584                     bx.add(lhs, rhs)
585                 }
586             }
587             mir::BinOp::Sub => {
588                 if is_float {
589                     bx.fsub(lhs, rhs)
590                 } else {
591                     bx.sub(lhs, rhs)
592                 }
593             }
594             mir::BinOp::Mul => {
595                 if is_float {
596                     bx.fmul(lhs, rhs)
597                 } else {
598                     bx.mul(lhs, rhs)
599                 }
600             }
601             mir::BinOp::Div => {
602                 if is_float {
603                     bx.fdiv(lhs, rhs)
604                 } else if is_signed {
605                     bx.sdiv(lhs, rhs)
606                 } else {
607                     bx.udiv(lhs, rhs)
608                 }
609             }
610             mir::BinOp::Rem => {
611                 if is_float {
612                     bx.frem(lhs, rhs)
613                 } else if is_signed {
614                     bx.srem(lhs, rhs)
615                 } else {
616                     bx.urem(lhs, rhs)
617                 }
618             }
619             mir::BinOp::BitOr => bx.or(lhs, rhs),
620             mir::BinOp::BitAnd => bx.and(lhs, rhs),
621             mir::BinOp::BitXor => bx.xor(lhs, rhs),
622             mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
623             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
624             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
625             mir::BinOp::Ne
626             | mir::BinOp::Lt
627             | mir::BinOp::Gt
628             | mir::BinOp::Eq
629             | mir::BinOp::Le
630             | mir::BinOp::Ge => {
631                 if is_float {
632                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
633                 } else {
634                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
635                 }
636             }
637         }
638     }
639
640     pub fn codegen_fat_ptr_binop(
641         &mut self,
642         bx: &mut Bx,
643         op: mir::BinOp,
644         lhs_addr: Bx::Value,
645         lhs_extra: Bx::Value,
646         rhs_addr: Bx::Value,
647         rhs_extra: Bx::Value,
648         _input_ty: Ty<'tcx>,
649     ) -> Bx::Value {
650         match op {
651             mir::BinOp::Eq => {
652                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
653                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
654                 bx.and(lhs, rhs)
655             }
656             mir::BinOp::Ne => {
657                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
658                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
659                 bx.or(lhs, rhs)
660             }
661             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
662                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
663                 let (op, strict_op) = match op {
664                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
665                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
666                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
667                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
668                     _ => bug!(),
669                 };
670                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
671                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
672                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
673                 let rhs = bx.and(and_lhs, and_rhs);
674                 bx.or(lhs, rhs)
675             }
676             _ => {
677                 bug!("unexpected fat ptr binop");
678             }
679         }
680     }
681
682     pub fn codegen_scalar_checked_binop(
683         &mut self,
684         bx: &mut Bx,
685         op: mir::BinOp,
686         lhs: Bx::Value,
687         rhs: Bx::Value,
688         input_ty: Ty<'tcx>,
689     ) -> OperandValue<Bx::Value> {
690         // This case can currently arise only from functions marked
691         // with #[rustc_inherit_overflow_checks] and inlined from
692         // another crate (mostly core::num generic/#[inline] fns),
693         // while the current crate doesn't use overflow checks.
694         if !bx.cx().check_overflow() {
695             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
696             return OperandValue::Pair(val, bx.cx().const_bool(false));
697         }
698
699         let (val, of) = match op {
700             // These are checked using intrinsics
701             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
702                 let oop = match op {
703                     mir::BinOp::Add => OverflowOp::Add,
704                     mir::BinOp::Sub => OverflowOp::Sub,
705                     mir::BinOp::Mul => OverflowOp::Mul,
706                     _ => unreachable!(),
707                 };
708                 bx.checked_binop(oop, input_ty, lhs, rhs)
709             }
710             mir::BinOp::Shl | mir::BinOp::Shr => {
711                 let lhs_llty = bx.cx().val_ty(lhs);
712                 let rhs_llty = bx.cx().val_ty(rhs);
713                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
714                 let outer_bits = bx.and(rhs, invert_mask);
715
716                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
717                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
718
719                 (val, of)
720             }
721             _ => bug!("Operator `{:?}` is not a checkable operator", op),
722         };
723
724         OperandValue::Pair(val, of)
725     }
726 }
727
728 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
729     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
730         match *rvalue {
731             mir::Rvalue::Ref(..) |
732             mir::Rvalue::AddressOf(..) |
733             mir::Rvalue::Len(..) |
734             mir::Rvalue::Cast(..) | // (*)
735             mir::Rvalue::BinaryOp(..) |
736             mir::Rvalue::CheckedBinaryOp(..) |
737             mir::Rvalue::UnaryOp(..) |
738             mir::Rvalue::Discriminant(..) |
739             mir::Rvalue::NullaryOp(..) |
740             mir::Rvalue::Use(..) => // (*)
741                 true,
742             mir::Rvalue::Repeat(..) |
743             mir::Rvalue::Aggregate(..) => {
744                 let ty = rvalue.ty(*self.mir, self.cx.tcx());
745                 let ty = self.monomorphize(&ty);
746                 self.cx.spanned_layout_of(ty, span).is_zst()
747             }
748         }
749
750         // (*) this is only true if the type is suitable
751     }
752 }
753
754 fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
755     bx: &mut Bx,
756     signed: bool,
757     x: Bx::Value,
758     float_ty: Bx::Type,
759     int_ty: Bx::Type,
760 ) -> Bx::Value {
761     let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
762
763     if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
764         return fptosui_result;
765     }
766
767     let int_width = bx.cx().int_width(int_ty);
768     let float_width = bx.cx().float_width(float_ty);
769     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
770     // destination integer type after rounding towards zero. This `undef` value can cause UB in
771     // safe code (see issue #10184), so we implement a saturating conversion on top of it:
772     // Semantically, the mathematical value of the input is rounded towards zero to the next
773     // mathematical integer, and then the result is clamped into the range of the destination
774     // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
775     // the destination integer type. NaN is mapped to 0.
776     //
777     // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
778     // a value representable in int_ty.
779     // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
780     // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
781     // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
782     // representable. Note that this only works if float_ty's exponent range is sufficiently large.
783     // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
784     // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
785     // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
786     // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
787     // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
788     let int_max = |signed: bool, int_width: u64| -> u128 {
789         let shift_amount = 128 - int_width;
790         if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
791     };
792     let int_min = |signed: bool, int_width: u64| -> i128 {
793         if signed { i128::MIN >> (128 - int_width) } else { 0 }
794     };
795
796     let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
797         let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
798         assert_eq!(rounded_min.status, Status::OK);
799         let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
800         assert!(rounded_max.value.is_finite());
801         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
802     };
803     let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
804         let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
805         assert_eq!(rounded_min.status, Status::OK);
806         let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
807         assert!(rounded_max.value.is_finite());
808         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
809     };
810
811     let mut float_bits_to_llval = |bits| {
812         let bits_llval = match float_width {
813             32 => bx.cx().const_u32(bits as u32),
814             64 => bx.cx().const_u64(bits as u64),
815             n => bug!("unsupported float width {}", n),
816         };
817         bx.bitcast(bits_llval, float_ty)
818     };
819     let (f_min, f_max) = match float_width {
820         32 => compute_clamp_bounds_single(signed, int_width),
821         64 => compute_clamp_bounds_double(signed, int_width),
822         n => bug!("unsupported float width {}", n),
823     };
824     let f_min = float_bits_to_llval(f_min);
825     let f_max = float_bits_to_llval(f_max);
826     // To implement saturation, we perform the following steps:
827     //
828     // 1. Cast x to an integer with fpto[su]i. This may result in undef.
829     // 2. Compare x to f_min and f_max, and use the comparison results to select:
830     //  a) int_ty::MIN if x < f_min or x is NaN
831     //  b) int_ty::MAX if x > f_max
832     //  c) the result of fpto[su]i otherwise
833     // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
834     //
835     // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
836     // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
837     // undef does not introduce any non-determinism either.
838     // More importantly, the above procedure correctly implements saturating conversion.
839     // Proof (sketch):
840     // If x is NaN, 0 is returned by definition.
841     // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
842     // This yields three cases to consider:
843     // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
844     //     saturating conversion for inputs in that range.
845     // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
846     //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
847     //     than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
848     //     is correct.
849     // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
850     //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
851     // QED.
852
853     // Step 1 was already performed above.
854
855     // Step 2: We use two comparisons and two selects, with %s1 being the result:
856     //     %less_or_nan = fcmp ult %x, %f_min
857     //     %greater = fcmp olt %x, %f_max
858     //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
859     //     %s1 = select %greater, int_ty::MAX, %s0
860     // Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
861     // operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
862     // becomes int_ty::MIN if x is NaN.
863     // Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
864     // negation, and the negation can be merged into the select. Therefore, it not necessarily any
865     // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
866     // performed is ultimately up to the backend, but at least x86 does perform them.
867     let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
868     let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
869     let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
870     let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
871     let s0 = bx.select(less_or_nan, int_min, fptosui_result);
872     let s1 = bx.select(greater, int_max, s0);
873
874     // Step 3: NaN replacement.
875     // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
876     // Therefore we only need to execute this step for signed integer types.
877     if signed {
878         // LLVM has no isNaN predicate, so we use (x == x) instead
879         let zero = bx.cx().const_uint(int_ty, 0);
880         let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
881         bx.select(cmp, s1, zero)
882     } else {
883         s1
884     }
885 }