]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/mir/rvalue.rs
3c469cffaf3cb1103eda7dcf256f3b8918f8f47e
[rust.git] / src / librustc_codegen_llvm / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use rustc::ty::{self, Ty};
12 use rustc::ty::cast::{CastTy, IntTy};
13 use rustc::ty::layout::{self, LayoutOf};
14 use rustc::mir;
15 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
16 use rustc_apfloat::{ieee, Float, Status, Round};
17 use std::{u128, i128};
18
19 use base;
20 use builder::Builder;
21 use callee;
22 use common::{self, IntPredicate, RealPredicate};
23 use context::CodegenCx;
24 use consts;
25 use monomorphize;
26 use type_::Type;
27 use type_of::LayoutLlvmExt;
28 use value::Value;
29
30 use interfaces::{BuilderMethods, CommonMethods, CommonWriteMethods};
31
32 use super::{FunctionCx, LocalRef};
33 use super::operand::{OperandRef, OperandValue};
34 use super::place::PlaceRef;
35
36 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
37     pub fn codegen_rvalue(&mut self,
38                         bx: Builder<'a, 'll, 'tcx>,
39                         dest: PlaceRef<'tcx, &'ll Value>,
40                         rvalue: &mir::Rvalue<'tcx>)
41                         -> Builder<'a, 'll, 'tcx>
42     {
43         debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
44                dest.llval, rvalue);
45
46         match *rvalue {
47            mir::Rvalue::Use(ref operand) => {
48                let cg_operand = self.codegen_operand(&bx, operand);
49                // FIXME: consider not copying constants through stack. (fixable by codegenning
50                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
51                cg_operand.val.store(&bx, dest);
52                bx
53            }
54
55             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
56                 // The destination necessarily contains a fat pointer, so if
57                 // it's a scalar pair, it's a fat pointer or newtype thereof.
58                 if dest.layout.is_llvm_scalar_pair() {
59                     // into-coerce of a thin pointer to a fat pointer - just
60                     // use the operand path.
61                     let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
62                     temp.val.store(&bx, dest);
63                     return bx;
64                 }
65
66                 // Unsize of a nontrivial struct. I would prefer for
67                 // this to be eliminated by MIR building, but
68                 // `CoerceUnsized` can be passed by a where-clause,
69                 // so the (generic) MIR may not be able to expand it.
70                 let operand = self.codegen_operand(&bx, source);
71                 match operand.val {
72                     OperandValue::Pair(..) |
73                     OperandValue::Immediate(_) => {
74                         // unsize from an immediate structure. We don't
75                         // really need a temporary alloca here, but
76                         // avoiding it would require us to have
77                         // `coerce_unsized_into` use extractvalue to
78                         // index into the struct, and this case isn't
79                         // important enough for it.
80                         debug!("codegen_rvalue: creating ugly alloca");
81                         let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
82                         scratch.storage_live(&bx);
83                         operand.val.store(&bx, scratch);
84                         base::coerce_unsized_into(&bx, scratch, dest);
85                         scratch.storage_dead(&bx);
86                     }
87                     OperandValue::Ref(llref, None, align) => {
88                         let source = PlaceRef::new_sized(llref, operand.layout, align);
89                         base::coerce_unsized_into(&bx, source, dest);
90                     }
91                     OperandValue::Ref(_, Some(_), _) => {
92                         bug!("unsized coercion on an unsized rvalue")
93                     }
94                 }
95                 bx
96             }
97
98             mir::Rvalue::Repeat(ref elem, count) => {
99                 let cg_elem = self.codegen_operand(&bx, elem);
100
101                 // Do not generate the loop for zero-sized elements or empty arrays.
102                 if dest.layout.is_zst() {
103                     return bx;
104                 }
105
106                 let start = dest.project_index(&bx, bx.cx().c_usize(0)).llval;
107
108                 if let OperandValue::Immediate(v) = cg_elem.val {
109                     let align = bx.cx().c_i32(dest.align.abi() as i32);
110                     let size = bx.cx().c_usize(dest.layout.size.bytes());
111
112                     // Use llvm.memset.p0i8.* to initialize all zero arrays
113                     if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 {
114                         let fill = bx.cx().c_u8(0);
115                         base::call_memset(&bx, start, fill, size, align, false);
116                         return bx;
117                     }
118
119                     // Use llvm.memset.p0i8.* to initialize byte arrays
120                     let v = base::from_immediate(&bx, v);
121                     if bx.cx().val_ty(v) == Type::i8(bx.cx()) {
122                         base::call_memset(&bx, start, v, size, align, false);
123                         return bx;
124                     }
125                 }
126
127                 let count = bx.cx().c_usize(count);
128                 let end = dest.project_index(&bx, count).llval;
129
130                 let header_bx = bx.build_sibling_block("repeat_loop_header");
131                 let body_bx = bx.build_sibling_block("repeat_loop_body");
132                 let next_bx = bx.build_sibling_block("repeat_loop_next");
133
134                 bx.br(header_bx.llbb());
135                 let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]);
136
137                 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
138                 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
139
140                 cg_elem.val.store(&body_bx,
141                     PlaceRef::new_sized(current, cg_elem.layout, dest.align));
142
143                 let next = body_bx.inbounds_gep(current, &[bx.cx().c_usize(1)]);
144                 body_bx.br(header_bx.llbb());
145                 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
146
147                 next_bx
148             }
149
150             mir::Rvalue::Aggregate(ref kind, ref operands) => {
151                 let (dest, active_field_index) = match **kind {
152                     mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
153                         dest.codegen_set_discr(&bx, variant_index);
154                         if adt_def.is_enum() {
155                             (dest.project_downcast(&bx, variant_index), active_field_index)
156                         } else {
157                             (dest, active_field_index)
158                         }
159                     }
160                     _ => (dest, None)
161                 };
162                 for (i, operand) in operands.iter().enumerate() {
163                     let op = self.codegen_operand(&bx, operand);
164                     // Do not generate stores and GEPis for zero-sized fields.
165                     if !op.layout.is_zst() {
166                         let field_index = active_field_index.unwrap_or(i);
167                         op.val.store(&bx, dest.project_field(&bx, field_index));
168                     }
169                 }
170                 bx
171             }
172
173             _ => {
174                 assert!(self.rvalue_creates_operand(rvalue));
175                 let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
176                 temp.val.store(&bx, dest);
177                 bx
178             }
179         }
180     }
181
182     pub fn codegen_rvalue_unsized(&mut self,
183                         bx: Builder<'a, 'll, 'tcx>,
184                         indirect_dest: PlaceRef<'tcx, &'ll Value>,
185                         rvalue: &mir::Rvalue<'tcx>)
186                         -> Builder<'a, 'll, 'tcx>
187     {
188         debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
189                indirect_dest.llval, rvalue);
190
191         match *rvalue {
192             mir::Rvalue::Use(ref operand) => {
193                 let cg_operand = self.codegen_operand(&bx, operand);
194                 cg_operand.val.store_unsized(&bx, indirect_dest);
195                 bx
196             }
197
198             _ => bug!("unsized assignment other than Rvalue::Use"),
199         }
200     }
201
202     pub fn codegen_rvalue_operand(
203         &mut self,
204         bx: Builder<'a, 'll, 'tcx>,
205         rvalue: &mir::Rvalue<'tcx>
206     ) -> (Builder<'a, 'll, 'tcx>, OperandRef<'tcx, &'ll Value>) {
207         assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
208
209         match *rvalue {
210             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
211                 let operand = self.codegen_operand(&bx, source);
212                 debug!("cast operand is {:?}", operand);
213                 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
214
215                 let val = match *kind {
216                     mir::CastKind::ReifyFnPointer => {
217                         match operand.layout.ty.sty {
218                             ty::FnDef(def_id, substs) => {
219                                 if bx.cx().tcx.has_attr(def_id, "rustc_args_required_const") {
220                                     bug!("reifying a fn ptr that requires \
221                                           const arguments");
222                                 }
223                                 OperandValue::Immediate(
224                                     callee::resolve_and_get_fn(bx.cx(), def_id, substs))
225                             }
226                             _ => {
227                                 bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
228                             }
229                         }
230                     }
231                     mir::CastKind::ClosureFnPointer => {
232                         match operand.layout.ty.sty {
233                             ty::Closure(def_id, substs) => {
234                                 let instance = monomorphize::resolve_closure(
235                                     bx.cx().tcx, def_id, substs, ty::ClosureKind::FnOnce);
236                                 OperandValue::Immediate(callee::get_fn(bx.cx(), instance))
237                             }
238                             _ => {
239                                 bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
240                             }
241                         }
242                     }
243                     mir::CastKind::UnsafeFnPointer => {
244                         // this is a no-op at the LLVM level
245                         operand.val
246                     }
247                     mir::CastKind::Unsize => {
248                         assert!(cast.is_llvm_scalar_pair());
249                         match operand.val {
250                             OperandValue::Pair(lldata, llextra) => {
251                                 // unsize from a fat pointer - this is a
252                                 // "trait-object-to-supertrait" coercion, for
253                                 // example,
254                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
255
256                                 // HACK(eddyb) have to bitcast pointers
257                                 // until LLVM removes pointee types.
258                                 let lldata = bx.pointercast(lldata,
259                                     cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
260                                 OperandValue::Pair(lldata, llextra)
261                             }
262                             OperandValue::Immediate(lldata) => {
263                                 // "standard" unsize
264                                 let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
265                                     operand.layout.ty, cast.ty);
266                                 OperandValue::Pair(lldata, llextra)
267                             }
268                             OperandValue::Ref(..) => {
269                                 bug!("by-ref operand {:?} in codegen_rvalue_operand",
270                                      operand);
271                             }
272                         }
273                     }
274                     mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
275                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
276                             if cast.is_llvm_scalar_pair() {
277                                 let data_cast = bx.pointercast(data_ptr,
278                                     cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
279                                 OperandValue::Pair(data_cast, meta)
280                             } else { // cast to thin-ptr
281                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
282                                 // pointer-cast of that pointer to desired pointer type.
283                                 let llcast_ty = cast.immediate_llvm_type(bx.cx());
284                                 let llval = bx.pointercast(data_ptr, llcast_ty);
285                                 OperandValue::Immediate(llval)
286                             }
287                         } else {
288                             bug!("Unexpected non-Pair operand")
289                         }
290                     }
291                     mir::CastKind::Misc => {
292                         assert!(cast.is_llvm_immediate());
293                         let ll_t_out = cast.immediate_llvm_type(bx.cx());
294                         if operand.layout.abi.is_uninhabited() {
295                             let val = OperandValue::Immediate(bx.cx().c_undef(ll_t_out));
296                             return (bx, OperandRef {
297                                 val,
298                                 layout: cast,
299                             });
300                         }
301                         let r_t_in = CastTy::from_ty(operand.layout.ty)
302                             .expect("bad input type for cast");
303                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
304                         let ll_t_in = operand.layout.immediate_llvm_type(bx.cx());
305                         match operand.layout.variants {
306                             layout::Variants::Single { index } => {
307                                 if let Some(def) = operand.layout.ty.ty_adt_def() {
308                                     let discr_val = def
309                                         .discriminant_for_variant(bx.cx().tcx, index)
310                                         .val;
311                                     let discr = bx.cx().c_uint_big(ll_t_out, discr_val);
312                                     return (bx, OperandRef {
313                                         val: OperandValue::Immediate(discr),
314                                         layout: cast,
315                                     });
316                                 }
317                             }
318                             layout::Variants::Tagged { .. } |
319                             layout::Variants::NicheFilling { .. } => {},
320                         }
321                         let llval = operand.immediate();
322
323                         let mut signed = false;
324                         if let layout::Abi::Scalar(ref scalar) = operand.layout.abi {
325                             if let layout::Int(_, s) = scalar.value {
326                                 // We use `i1` for bytes that are always `0` or `1`,
327                                 // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
328                                 // let LLVM interpret the `i1` as signed, because
329                                 // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
330                                 signed = !scalar.is_bool() && s;
331
332                                 let er = scalar.valid_range_exclusive(bx.cx());
333                                 if er.end != er.start &&
334                                    scalar.valid_range.end() > scalar.valid_range.start() {
335                                     // We want `table[e as usize]` to not
336                                     // have bound checks, and this is the most
337                                     // convenient place to put the `assume`.
338
339                                     base::call_assume(&bx, bx.icmp(
340                                         IntPredicate::IntULE,
341                                         llval,
342                                         bx.cx().c_uint_big(ll_t_in, *scalar.valid_range.end())
343                                     ));
344                                 }
345                             }
346                         }
347
348                         let newval = match (r_t_in, r_t_out) {
349                             (CastTy::Int(_), CastTy::Int(_)) => {
350                                 bx.intcast(llval, ll_t_out, signed)
351                             }
352                             (CastTy::Float, CastTy::Float) => {
353                                 let srcsz = ll_t_in.float_width();
354                                 let dstsz = ll_t_out.float_width();
355                                 if dstsz > srcsz {
356                                     bx.fpext(llval, ll_t_out)
357                                 } else if srcsz > dstsz {
358                                     bx.fptrunc(llval, ll_t_out)
359                                 } else {
360                                     llval
361                                 }
362                             }
363                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
364                             (CastTy::FnPtr, CastTy::Ptr(_)) |
365                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
366                                 bx.pointercast(llval, ll_t_out),
367                             (CastTy::Ptr(_), CastTy::Int(_)) |
368                             (CastTy::FnPtr, CastTy::Int(_)) =>
369                                 bx.ptrtoint(llval, ll_t_out),
370                             (CastTy::Int(_), CastTy::Ptr(_)) => {
371                                 let usize_llval = bx.intcast(llval, bx.cx().isize_ty, signed);
372                                 bx.inttoptr(usize_llval, ll_t_out)
373                             }
374                             (CastTy::Int(_), CastTy::Float) =>
375                                 cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
376                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
377                                 cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
378                             (CastTy::Float, CastTy::Int(_)) =>
379                                 cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
380                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
381                         };
382                         OperandValue::Immediate(newval)
383                     }
384                 };
385                 (bx, OperandRef {
386                     val,
387                     layout: cast
388                 })
389             }
390
391             mir::Rvalue::Ref(_, bk, ref place) => {
392                 let cg_place = self.codegen_place(&bx, place);
393
394                 let ty = cg_place.layout.ty;
395
396                 // Note: places are indirect, so storing the `llval` into the
397                 // destination effectively creates a reference.
398                 let val = if !bx.cx().type_has_metadata(ty) {
399                     OperandValue::Immediate(cg_place.llval)
400                 } else {
401                     OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
402                 };
403                 (bx, OperandRef {
404                     val,
405                     layout: self.cx.layout_of(self.cx.tcx.mk_ref(
406                         self.cx.tcx.types.re_erased,
407                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
408                     )),
409                 })
410             }
411
412             mir::Rvalue::Len(ref place) => {
413                 let size = self.evaluate_array_len(&bx, place);
414                 let operand = OperandRef {
415                     val: OperandValue::Immediate(size),
416                     layout: bx.cx().layout_of(bx.tcx().types.usize),
417                 };
418                 (bx, operand)
419             }
420
421             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
422                 let lhs = self.codegen_operand(&bx, lhs);
423                 let rhs = self.codegen_operand(&bx, rhs);
424                 let llresult = match (lhs.val, rhs.val) {
425                     (OperandValue::Pair(lhs_addr, lhs_extra),
426                      OperandValue::Pair(rhs_addr, rhs_extra)) => {
427                         self.codegen_fat_ptr_binop(&bx, op,
428                                                  lhs_addr, lhs_extra,
429                                                  rhs_addr, rhs_extra,
430                                                  lhs.layout.ty)
431                     }
432
433                     (OperandValue::Immediate(lhs_val),
434                      OperandValue::Immediate(rhs_val)) => {
435                         self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
436                     }
437
438                     _ => bug!()
439                 };
440                 let operand = OperandRef {
441                     val: OperandValue::Immediate(llresult),
442                     layout: bx.cx().layout_of(
443                         op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
444                 };
445                 (bx, operand)
446             }
447             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
448                 let lhs = self.codegen_operand(&bx, lhs);
449                 let rhs = self.codegen_operand(&bx, rhs);
450                 let result = self.codegen_scalar_checked_binop(&bx, op,
451                                                              lhs.immediate(), rhs.immediate(),
452                                                              lhs.layout.ty);
453                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
454                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
455                 let operand = OperandRef {
456                     val: result,
457                     layout: bx.cx().layout_of(operand_ty)
458                 };
459
460                 (bx, operand)
461             }
462
463             mir::Rvalue::UnaryOp(op, ref operand) => {
464                 let operand = self.codegen_operand(&bx, operand);
465                 let lloperand = operand.immediate();
466                 let is_float = operand.layout.ty.is_fp();
467                 let llval = match op {
468                     mir::UnOp::Not => bx.not(lloperand),
469                     mir::UnOp::Neg => if is_float {
470                         bx.fneg(lloperand)
471                     } else {
472                         bx.neg(lloperand)
473                     }
474                 };
475                 (bx, OperandRef {
476                     val: OperandValue::Immediate(llval),
477                     layout: operand.layout,
478                 })
479             }
480
481             mir::Rvalue::Discriminant(ref place) => {
482                 let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
483                 let discr =  self.codegen_place(&bx, place)
484                     .codegen_get_discr(&bx, discr_ty);
485                 (bx, OperandRef {
486                     val: OperandValue::Immediate(discr),
487                     layout: self.cx.layout_of(discr_ty)
488                 })
489             }
490
491             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
492                 assert!(bx.cx().type_is_sized(ty));
493                 let val = bx.cx().c_usize(bx.cx().size_of(ty).bytes());
494                 let tcx = bx.tcx();
495                 (bx, OperandRef {
496                     val: OperandValue::Immediate(val),
497                     layout: self.cx.layout_of(tcx.types.usize),
498                 })
499             }
500
501             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
502                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
503                 let (size, align) = bx.cx().size_and_align_of(content_ty);
504                 let llsize = bx.cx().c_usize(size.bytes());
505                 let llalign = bx.cx().c_usize(align.abi());
506                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
507                 let llty_ptr = box_layout.llvm_type(bx.cx());
508
509                 // Allocate space:
510                 let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
511                     Ok(id) => id,
512                     Err(s) => {
513                         bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
514                     }
515                 };
516                 let instance = ty::Instance::mono(bx.tcx(), def_id);
517                 let r = callee::get_fn(bx.cx(), instance);
518                 let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
519
520                 let operand = OperandRef {
521                     val: OperandValue::Immediate(val),
522                     layout: box_layout,
523                 };
524                 (bx, operand)
525             }
526             mir::Rvalue::Use(ref operand) => {
527                 let operand = self.codegen_operand(&bx, operand);
528                 (bx, operand)
529             }
530             mir::Rvalue::Repeat(..) |
531             mir::Rvalue::Aggregate(..) => {
532                 // According to `rvalue_creates_operand`, only ZST
533                 // aggregate rvalues are allowed to be operands.
534                 let ty = rvalue.ty(self.mir, self.cx.tcx);
535                 (bx, OperandRef::new_zst(self.cx,
536                     self.cx.layout_of(self.monomorphize(&ty))))
537             }
538         }
539     }
540
541     fn evaluate_array_len(
542         &mut self,
543         bx: &Builder<'a, 'll, 'tcx>,
544         place: &mir::Place<'tcx>,
545     ) -> &'ll Value {
546         // ZST are passed as operands and require special handling
547         // because codegen_place() panics if Local is operand.
548         if let mir::Place::Local(index) = *place {
549             if let LocalRef::Operand(Some(op)) = self.locals[index] {
550                 if let ty::Array(_, n) = op.layout.ty.sty {
551                     let n = n.unwrap_usize(bx.cx().tcx);
552                     return bx.cx().c_usize(n);
553                 }
554             }
555         }
556         // use common size calculation for non zero-sized types
557         let cg_value = self.codegen_place(&bx, place);
558         return cg_value.len(bx.cx());
559     }
560
561     pub fn codegen_scalar_binop(
562         &mut self,
563         bx: &Builder<'a, 'll, 'tcx>,
564         op: mir::BinOp,
565         lhs: &'ll Value,
566         rhs: &'ll Value,
567         input_ty: Ty<'tcx>,
568     ) -> &'ll Value {
569         let is_float = input_ty.is_fp();
570         let is_signed = input_ty.is_signed();
571         let is_unit = input_ty.is_unit();
572         match op {
573             mir::BinOp::Add => if is_float {
574                 bx.fadd(lhs, rhs)
575             } else {
576                 bx.add(lhs, rhs)
577             },
578             mir::BinOp::Sub => if is_float {
579                 bx.fsub(lhs, rhs)
580             } else {
581                 bx.sub(lhs, rhs)
582             },
583             mir::BinOp::Mul => if is_float {
584                 bx.fmul(lhs, rhs)
585             } else {
586                 bx.mul(lhs, rhs)
587             },
588             mir::BinOp::Div => if is_float {
589                 bx.fdiv(lhs, rhs)
590             } else if is_signed {
591                 bx.sdiv(lhs, rhs)
592             } else {
593                 bx.udiv(lhs, rhs)
594             },
595             mir::BinOp::Rem => if is_float {
596                 bx.frem(lhs, rhs)
597             } else if is_signed {
598                 bx.srem(lhs, rhs)
599             } else {
600                 bx.urem(lhs, rhs)
601             },
602             mir::BinOp::BitOr => bx.or(lhs, rhs),
603             mir::BinOp::BitAnd => bx.and(lhs, rhs),
604             mir::BinOp::BitXor => bx.xor(lhs, rhs),
605             mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
606             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
607             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
608             mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
609             mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
610                 bx.cx().c_bool(match op {
611                     mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
612                     mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
613                     _ => unreachable!()
614                 })
615             } else if is_float {
616                 bx.fcmp(
617                     base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
618                     lhs, rhs
619                 )
620             } else {
621                 bx.icmp(
622                     base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
623                     lhs, rhs
624                 )
625             }
626         }
627     }
628
629     pub fn codegen_fat_ptr_binop(
630         &mut self,
631         bx: &Builder<'a, 'll, 'tcx>,
632         op: mir::BinOp,
633         lhs_addr: &'ll Value,
634         lhs_extra: &'ll Value,
635         rhs_addr: &'ll Value,
636         rhs_extra: &'ll Value,
637         _input_ty: Ty<'tcx>,
638     ) -> &'ll Value {
639         match op {
640             mir::BinOp::Eq => {
641                 bx.and(
642                     bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
643                     bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra)
644                 )
645             }
646             mir::BinOp::Ne => {
647                 bx.or(
648                     bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr),
649                     bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra)
650                 )
651             }
652             mir::BinOp::Le | mir::BinOp::Lt |
653             mir::BinOp::Ge | mir::BinOp::Gt => {
654                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
655                 let (op, strict_op) = match op {
656                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
657                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
658                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
659                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
660                     _ => bug!(),
661                 };
662
663                 bx.or(
664                     bx.icmp(strict_op, lhs_addr, rhs_addr),
665                     bx.and(
666                         bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
667                         bx.icmp(op, lhs_extra, rhs_extra)
668                     )
669                 )
670             }
671             _ => {
672                 bug!("unexpected fat ptr binop");
673             }
674         }
675     }
676
677     pub fn codegen_scalar_checked_binop(&mut self,
678                                       bx: &Builder<'a, 'll, 'tcx>,
679                                       op: mir::BinOp,
680                                       lhs: &'ll Value,
681                                       rhs: &'ll Value,
682                                       input_ty: Ty<'tcx>) -> OperandValue<&'ll Value> {
683         // This case can currently arise only from functions marked
684         // with #[rustc_inherit_overflow_checks] and inlined from
685         // another crate (mostly core::num generic/#[inline] fns),
686         // while the current crate doesn't use overflow checks.
687         if !bx.cx().check_overflow {
688             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
689             return OperandValue::Pair(val, bx.cx().c_bool(false));
690         }
691
692         let (val, of) = match op {
693             // These are checked using intrinsics
694             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
695                 let oop = match op {
696                     mir::BinOp::Add => OverflowOp::Add,
697                     mir::BinOp::Sub => OverflowOp::Sub,
698                     mir::BinOp::Mul => OverflowOp::Mul,
699                     _ => unreachable!()
700                 };
701                 let intrinsic = get_overflow_intrinsic(oop, bx, input_ty);
702                 let res = bx.call(intrinsic, &[lhs, rhs], None);
703
704                 (bx.extract_value(res, 0),
705                  bx.extract_value(res, 1))
706             }
707             mir::BinOp::Shl | mir::BinOp::Shr => {
708                 let lhs_llty = bx.cx().val_ty(lhs);
709                 let rhs_llty = bx.cx().val_ty(rhs);
710                 let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
711                 let outer_bits = bx.and(rhs, invert_mask);
712
713                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().c_null(rhs_llty));
714                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
715
716                 (val, of)
717             }
718             _ => {
719                 bug!("Operator `{:?}` is not a checkable operator", op)
720             }
721         };
722
723         OperandValue::Pair(val, of)
724     }
725
726     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
727         match *rvalue {
728             mir::Rvalue::Ref(..) |
729             mir::Rvalue::Len(..) |
730             mir::Rvalue::Cast(..) | // (*)
731             mir::Rvalue::BinaryOp(..) |
732             mir::Rvalue::CheckedBinaryOp(..) |
733             mir::Rvalue::UnaryOp(..) |
734             mir::Rvalue::Discriminant(..) |
735             mir::Rvalue::NullaryOp(..) |
736             mir::Rvalue::Use(..) => // (*)
737                 true,
738             mir::Rvalue::Repeat(..) |
739             mir::Rvalue::Aggregate(..) => {
740                 let ty = rvalue.ty(self.mir, self.cx.tcx);
741                 let ty = self.monomorphize(&ty);
742                 self.cx.layout_of(ty).is_zst()
743             }
744         }
745
746         // (*) this is only true if the type is suitable
747     }
748 }
749
750 #[derive(Copy, Clone)]
751 enum OverflowOp {
752     Add, Sub, Mul
753 }
754
755 fn get_overflow_intrinsic(
756     oop: OverflowOp,
757     bx: &Builder<'_, 'll, '_>,
758     ty: Ty
759 ) -> &'ll Value {
760     use syntax::ast::IntTy::*;
761     use syntax::ast::UintTy::*;
762     use rustc::ty::{Int, Uint};
763
764     let tcx = bx.tcx();
765
766     let new_sty = match ty.sty {
767         Int(Isize) => Int(tcx.sess.target.isize_ty),
768         Uint(Usize) => Uint(tcx.sess.target.usize_ty),
769         ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
770         _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
771     };
772
773     let name = match oop {
774         OverflowOp::Add => match new_sty {
775             Int(I8) => "llvm.sadd.with.overflow.i8",
776             Int(I16) => "llvm.sadd.with.overflow.i16",
777             Int(I32) => "llvm.sadd.with.overflow.i32",
778             Int(I64) => "llvm.sadd.with.overflow.i64",
779             Int(I128) => "llvm.sadd.with.overflow.i128",
780
781             Uint(U8) => "llvm.uadd.with.overflow.i8",
782             Uint(U16) => "llvm.uadd.with.overflow.i16",
783             Uint(U32) => "llvm.uadd.with.overflow.i32",
784             Uint(U64) => "llvm.uadd.with.overflow.i64",
785             Uint(U128) => "llvm.uadd.with.overflow.i128",
786
787             _ => unreachable!(),
788         },
789         OverflowOp::Sub => match new_sty {
790             Int(I8) => "llvm.ssub.with.overflow.i8",
791             Int(I16) => "llvm.ssub.with.overflow.i16",
792             Int(I32) => "llvm.ssub.with.overflow.i32",
793             Int(I64) => "llvm.ssub.with.overflow.i64",
794             Int(I128) => "llvm.ssub.with.overflow.i128",
795
796             Uint(U8) => "llvm.usub.with.overflow.i8",
797             Uint(U16) => "llvm.usub.with.overflow.i16",
798             Uint(U32) => "llvm.usub.with.overflow.i32",
799             Uint(U64) => "llvm.usub.with.overflow.i64",
800             Uint(U128) => "llvm.usub.with.overflow.i128",
801
802             _ => unreachable!(),
803         },
804         OverflowOp::Mul => match new_sty {
805             Int(I8) => "llvm.smul.with.overflow.i8",
806             Int(I16) => "llvm.smul.with.overflow.i16",
807             Int(I32) => "llvm.smul.with.overflow.i32",
808             Int(I64) => "llvm.smul.with.overflow.i64",
809             Int(I128) => "llvm.smul.with.overflow.i128",
810
811             Uint(U8) => "llvm.umul.with.overflow.i8",
812             Uint(U16) => "llvm.umul.with.overflow.i16",
813             Uint(U32) => "llvm.umul.with.overflow.i32",
814             Uint(U64) => "llvm.umul.with.overflow.i64",
815             Uint(U128) => "llvm.umul.with.overflow.i128",
816
817             _ => unreachable!(),
818         },
819     };
820
821     bx.cx().get_intrinsic(&name)
822 }
823
824 fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
825                      signed: bool,
826                      x: &'ll Value,
827                      int_ty: &'ll Type,
828                      float_ty: &'ll Type) -> &'ll Value {
829     // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
830     // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
831     // LLVM's uitofp produces undef in those cases, so we manually check for that case.
832     let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32;
833     if is_u128_to_f32 {
834         // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
835         // and for everything else LLVM's uitofp works just fine.
836         use rustc_apfloat::ieee::Single;
837         use rustc_apfloat::Float;
838         const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
839                                             << (Single::MAX_EXP - Single::PRECISION as i16);
840         let max = bx.cx().c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
841         let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
842         let infinity_bits = bx.cx().c_u32(ieee::Single::INFINITY.to_bits() as u32);
843         let infinity = consts::bitcast(infinity_bits, float_ty);
844         bx.select(overflow, infinity, bx.uitofp(x, float_ty))
845     } else {
846         if signed {
847             bx.sitofp(x, float_ty)
848         } else {
849             bx.uitofp(x, float_ty)
850         }
851     }
852 }
853
854 fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
855                      signed: bool,
856                      x: &'ll Value,
857                      float_ty: &'ll Type,
858                      int_ty: &'ll Type) -> &'ll Value {
859     let fptosui_result = if signed {
860         bx.fptosi(x, int_ty)
861     } else {
862         bx.fptoui(x, int_ty)
863     };
864
865     if !bx.sess().opts.debugging_opts.saturating_float_casts {
866         return fptosui_result;
867     }
868     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
869     // destination integer type after rounding towards zero. This `undef` value can cause UB in
870     // safe code (see issue #10184), so we implement a saturating conversion on top of it:
871     // Semantically, the mathematical value of the input is rounded towards zero to the next
872     // mathematical integer, and then the result is clamped into the range of the destination
873     // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
874     // the destination integer type. NaN is mapped to 0.
875     //
876     // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
877     // a value representable in int_ty.
878     // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
879     // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
880     // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
881     // representable. Note that this only works if float_ty's exponent range is sufficiently large.
882     // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
883     // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
884     // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
885     // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
886     // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
887     fn compute_clamp_bounds<F: Float>(signed: bool, int_ty: &Type) -> (u128, u128) {
888         let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
889         assert_eq!(rounded_min.status, Status::OK);
890         let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
891         assert!(rounded_max.value.is_finite());
892         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
893     }
894     fn int_max(signed: bool, int_ty: &Type) -> u128 {
895         let shift_amount = 128 - int_ty.int_width();
896         if signed {
897             i128::MAX as u128 >> shift_amount
898         } else {
899             u128::MAX >> shift_amount
900         }
901     }
902     fn int_min(signed: bool, int_ty: &Type) -> i128 {
903         if signed {
904             i128::MIN >> (128 - int_ty.int_width())
905         } else {
906             0
907         }
908     }
909     let float_bits_to_llval = |bits| {
910         let bits_llval = match float_ty.float_width() {
911             32 => bx.cx().c_u32(bits as u32),
912             64 => bx.cx().c_u64(bits as u64),
913             n => bug!("unsupported float width {}", n),
914         };
915         consts::bitcast(bits_llval, float_ty)
916     };
917     let (f_min, f_max) = match float_ty.float_width() {
918         32 => compute_clamp_bounds::<ieee::Single>(signed, int_ty),
919         64 => compute_clamp_bounds::<ieee::Double>(signed, int_ty),
920         n => bug!("unsupported float width {}", n),
921     };
922     let f_min = float_bits_to_llval(f_min);
923     let f_max = float_bits_to_llval(f_max);
924     // To implement saturation, we perform the following steps:
925     //
926     // 1. Cast x to an integer with fpto[su]i. This may result in undef.
927     // 2. Compare x to f_min and f_max, and use the comparison results to select:
928     //  a) int_ty::MIN if x < f_min or x is NaN
929     //  b) int_ty::MAX if x > f_max
930     //  c) the result of fpto[su]i otherwise
931     // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
932     //
933     // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
934     // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
935     // undef does not introduce any non-determinism either.
936     // More importantly, the above procedure correctly implements saturating conversion.
937     // Proof (sketch):
938     // If x is NaN, 0 is returned by definition.
939     // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
940     // This yields three cases to consider:
941     // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
942     //     saturating conversion for inputs in that range.
943     // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
944     //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
945     //     than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
946     //     is correct.
947     // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
948     //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
949     // QED.
950
951     // Step 1 was already performed above.
952
953     // Step 2: We use two comparisons and two selects, with %s1 being the result:
954     //     %less_or_nan = fcmp ult %x, %f_min
955     //     %greater = fcmp olt %x, %f_max
956     //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
957     //     %s1 = select %greater, int_ty::MAX, %s0
958     // Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
959     // operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
960     // becomes int_ty::MIN if x is NaN.
961     // Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
962     // negation, and the negation can be merged into the select. Therefore, it not necessarily any
963     // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
964     // performed is ultimately up to the backend, but at least x86 does perform them.
965     let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
966     let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
967     let int_max = bx.cx().c_uint_big(int_ty, int_max(signed, int_ty));
968     let int_min = bx.cx().c_uint_big(int_ty, int_min(signed, int_ty) as u128);
969     let s0 = bx.select(less_or_nan, int_min, fptosui_result);
970     let s1 = bx.select(greater, int_max, s0);
971
972     // Step 3: NaN replacement.
973     // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
974     // Therefore we only need to execute this step for signed integer types.
975     if signed {
976         // LLVM has no isNaN predicate, so we use (x == x) instead
977         bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().c_uint(int_ty, 0))
978     } else {
979         s1
980     }
981 }