]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/rvalue.rs
69c53879354ee463db67d9e3c7166c0d990b9a1e
[rust.git] / src / librustc_codegen_ssa / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use rustc::ty::{self, Ty};
12 use rustc::ty::cast::{CastTy, IntTy};
13 use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
14 use rustc::mir;
15 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
16 use rustc_apfloat::{ieee, Float, Status, Round};
17 use std::{u128, i128};
18
19 use base;
20 use MemFlags;
21 use callee;
22 use common::{self, RealPredicate, IntPredicate};
23 use rustc_mir::monomorphize;
24
25 use interfaces::*;
26
27 use super::{FunctionCx, LocalRef};
28 use super::operand::{OperandRef, OperandValue};
29 use super::place::PlaceRef;
30
31 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
32     pub fn codegen_rvalue(
33         &mut self,
34         bx: Bx,
35         dest: PlaceRef<'tcx, Bx::Value>,
36         rvalue: &mir::Rvalue<'tcx>
37     ) -> Bx {
38         debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
39                dest.llval, rvalue);
40
41         match *rvalue {
42            mir::Rvalue::Use(ref operand) => {
43                let cg_operand = self.codegen_operand(&bx, operand);
44                // FIXME: consider not copying constants through stack. (fixable by codegenning
45                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
46                cg_operand.val.store(&bx, dest);
47                bx
48            }
49
50             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
51                 // The destination necessarily contains a fat pointer, so if
52                 // it's a scalar pair, it's a fat pointer or newtype thereof.
53                 if bx.cx().is_backend_scalar_pair(dest.layout) {
54                     // into-coerce of a thin pointer to a fat pointer - just
55                     // use the operand path.
56                     let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
57                     temp.val.store(&bx, dest);
58                     return bx;
59                 }
60
61                 // Unsize of a nontrivial struct. I would prefer for
62                 // this to be eliminated by MIR building, but
63                 // `CoerceUnsized` can be passed by a where-clause,
64                 // so the (generic) MIR may not be able to expand it.
65                 let operand = self.codegen_operand(&bx, source);
66                 match operand.val {
67                     OperandValue::Pair(..) |
68                     OperandValue::Immediate(_) => {
69                         // unsize from an immediate structure. We don't
70                         // really need a temporary alloca here, but
71                         // avoiding it would require us to have
72                         // `coerce_unsized_into` use extractvalue to
73                         // index into the struct, and this case isn't
74                         // important enough for it.
75                         debug!("codegen_rvalue: creating ugly alloca");
76                         let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
77                         scratch.storage_live(&bx);
78                         operand.val.store(&bx, scratch);
79                         base::coerce_unsized_into(&bx, scratch, dest);
80                         scratch.storage_dead(&bx);
81                     }
82                     OperandValue::Ref(llref, None, align) => {
83                         let source = PlaceRef::new_sized(llref, operand.layout, align);
84                         base::coerce_unsized_into(&bx, source, dest);
85                     }
86                     OperandValue::Ref(_, Some(_), _) => {
87                         bug!("unsized coercion on an unsized rvalue")
88                     }
89                 }
90                 bx
91             }
92
93             mir::Rvalue::Repeat(ref elem, count) => {
94                 let cg_elem = self.codegen_operand(&bx, elem);
95
96                 // Do not generate the loop for zero-sized elements or empty arrays.
97                 if dest.layout.is_zst() {
98                     return bx;
99                 }
100
101                 let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
102
103                 if let OperandValue::Immediate(v) = cg_elem.val {
104                     let size = bx.cx().const_usize(dest.layout.size.bytes());
105
106                     // Use llvm.memset.p0i8.* to initialize all zero arrays
107                     if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 {
108                         let fill = bx.cx().const_u8(0);
109                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
110                         return bx;
111                     }
112
113                     // Use llvm.memset.p0i8.* to initialize byte arrays
114                     let v = base::from_immediate(&bx, v);
115                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
116                         bx.memset(start, v, size, dest.align, MemFlags::empty());
117                         return bx;
118                     }
119                 }
120
121                 let count = bx.cx().const_usize(count);
122                 let end = dest.project_index(&bx, count).llval;
123
124                 let header_bx = bx.build_sibling_block("repeat_loop_header");
125                 let body_bx = bx.build_sibling_block("repeat_loop_body");
126                 let next_bx = bx.build_sibling_block("repeat_loop_next");
127
128                 bx.br(header_bx.llbb());
129                 let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]);
130
131                 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
132                 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
133
134                 cg_elem.val.store(&body_bx,
135                     PlaceRef::new_sized(current, cg_elem.layout, dest.align));
136
137                 let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
138                 body_bx.br(header_bx.llbb());
139                 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
140
141                 next_bx
142             }
143
144             mir::Rvalue::Aggregate(ref kind, ref operands) => {
145                 let (dest, active_field_index) = match **kind {
146                     mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
147                         dest.codegen_set_discr(&bx, variant_index);
148                         if adt_def.is_enum() {
149                             (dest.project_downcast(&bx, variant_index), active_field_index)
150                         } else {
151                             (dest, active_field_index)
152                         }
153                     }
154                     _ => (dest, None)
155                 };
156                 for (i, operand) in operands.iter().enumerate() {
157                     let op = self.codegen_operand(&bx, operand);
158                     // Do not generate stores and GEPis for zero-sized fields.
159                     if !op.layout.is_zst() {
160                         let field_index = active_field_index.unwrap_or(i);
161                         op.val.store(&bx, dest.project_field(&bx, field_index));
162                     }
163                 }
164                 bx
165             }
166
167             _ => {
168                 assert!(self.rvalue_creates_operand(rvalue));
169                 let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
170                 temp.val.store(&bx, dest);
171                 bx
172             }
173         }
174     }
175
176     pub fn codegen_rvalue_unsized(
177         &mut self,
178         bx: Bx,
179         indirect_dest: PlaceRef<'tcx, Bx::Value>,
180         rvalue: &mir::Rvalue<'tcx>,
181     ) -> Bx {
182         debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
183                indirect_dest.llval, rvalue);
184
185         match *rvalue {
186             mir::Rvalue::Use(ref operand) => {
187                 let cg_operand = self.codegen_operand(&bx, operand);
188                 cg_operand.val.store_unsized(&bx, indirect_dest);
189                 bx
190             }
191
192             _ => bug!("unsized assignment other than Rvalue::Use"),
193         }
194     }
195
196     pub fn codegen_rvalue_operand(
197         &mut self,
198         bx: Bx,
199         rvalue: &mir::Rvalue<'tcx>
200     ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
201         assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
202
203         match *rvalue {
204             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
205                 let operand = self.codegen_operand(&bx, source);
206                 debug!("cast operand is {:?}", operand);
207                 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
208
209                 let val = match *kind {
210                     mir::CastKind::ReifyFnPointer => {
211                         match operand.layout.ty.sty {
212                             ty::FnDef(def_id, substs) => {
213                                 if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") {
214                                     bug!("reifying a fn ptr that requires \
215                                           const arguments");
216                                 }
217                                 OperandValue::Immediate(
218                                     callee::resolve_and_get_fn(bx.cx(), def_id, substs))
219                             }
220                             _ => {
221                                 bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
222                             }
223                         }
224                     }
225                     mir::CastKind::ClosureFnPointer => {
226                         match operand.layout.ty.sty {
227                             ty::Closure(def_id, substs) => {
228                                 let instance = monomorphize::resolve_closure(
229                                     bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce);
230                                 OperandValue::Immediate(bx.cx().get_fn(instance))
231                             }
232                             _ => {
233                                 bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
234                             }
235                         }
236                     }
237                     mir::CastKind::UnsafeFnPointer => {
238                         // this is a no-op at the LLVM level
239                         operand.val
240                     }
241                     mir::CastKind::Unsize => {
242                         assert!(bx.cx().is_backend_scalar_pair(cast));
243                         match operand.val {
244                             OperandValue::Pair(lldata, llextra) => {
245                                 // unsize from a fat pointer - this is a
246                                 // "trait-object-to-supertrait" coercion, for
247                                 // example,
248                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
249
250                                 // HACK(eddyb) have to bitcast pointers
251                                 // until LLVM removes pointee types.
252                                 let lldata = bx.pointercast(lldata,
253                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true));
254                                 OperandValue::Pair(lldata, llextra)
255                             }
256                             OperandValue::Immediate(lldata) => {
257                                 // "standard" unsize
258                                 let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
259                                     operand.layout.ty, cast.ty);
260                                 OperandValue::Pair(lldata, llextra)
261                             }
262                             OperandValue::Ref(..) => {
263                                 bug!("by-ref operand {:?} in codegen_rvalue_operand",
264                                      operand);
265                             }
266                         }
267                     }
268                     mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => {
269                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
270                             if bx.cx().is_backend_scalar_pair(cast) {
271                                 let data_cast = bx.pointercast(data_ptr,
272                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true));
273                                 OperandValue::Pair(data_cast, meta)
274                             } else { // cast to thin-ptr
275                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
276                                 // pointer-cast of that pointer to desired pointer type.
277                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
278                                 let llval = bx.pointercast(data_ptr, llcast_ty);
279                                 OperandValue::Immediate(llval)
280                             }
281                         } else {
282                             bug!("Unexpected non-Pair operand")
283                         }
284                     }
285                     mir::CastKind::Misc => {
286                         assert!(bx.cx().is_backend_immediate(cast));
287                         let ll_t_out = bx.cx().immediate_backend_type(cast);
288                         if operand.layout.abi.is_uninhabited() {
289                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
290                             return (bx, OperandRef {
291                                 val,
292                                 layout: cast,
293                             });
294                         }
295                         let r_t_in = CastTy::from_ty(operand.layout.ty)
296                             .expect("bad input type for cast");
297                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
298                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
299                         match operand.layout.variants {
300                             layout::Variants::Single { index } => {
301                                 if let Some(def) = operand.layout.ty.ty_adt_def() {
302                                     let discr_val = def
303                                         .discriminant_for_variant(bx.cx().tcx(), index)
304                                         .val;
305                                     let discr = bx.cx().const_uint_big(ll_t_out, discr_val);
306                                     return (bx, OperandRef {
307                                         val: OperandValue::Immediate(discr),
308                                         layout: cast,
309                                     });
310                                 }
311                             }
312                             layout::Variants::Tagged { .. } |
313                             layout::Variants::NicheFilling { .. } => {},
314                         }
315                         let llval = operand.immediate();
316
317                         let mut signed = false;
318                         if let layout::Abi::Scalar(ref scalar) = operand.layout.abi {
319                             if let layout::Int(_, s) = scalar.value {
320                                 // We use `i1` for bytes that are always `0` or `1`,
321                                 // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
322                                 // let LLVM interpret the `i1` as signed, because
323                                 // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
324                                 signed = !scalar.is_bool() && s;
325
326                                 let er = scalar.valid_range_exclusive(bx.cx());
327                                 if er.end != er.start &&
328                                    scalar.valid_range.end() > scalar.valid_range.start() {
329                                     // We want `table[e as usize]` to not
330                                     // have bound checks, and this is the most
331                                     // convenient place to put the `assume`.
332
333                                     base::call_assume(&bx, bx.icmp(
334                                         IntPredicate::IntULE,
335                                         llval,
336                                         bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
337                                     ));
338                                 }
339                             }
340                         }
341
342                         let newval = match (r_t_in, r_t_out) {
343                             (CastTy::Int(_), CastTy::Int(_)) => {
344                                 bx.intcast(llval, ll_t_out, signed)
345                             }
346                             (CastTy::Float, CastTy::Float) => {
347                                 let srcsz = bx.cx().float_width(ll_t_in);
348                                 let dstsz = bx.cx().float_width(ll_t_out);
349                                 if dstsz > srcsz {
350                                     bx.fpext(llval, ll_t_out)
351                                 } else if srcsz > dstsz {
352                                     bx.fptrunc(llval, ll_t_out)
353                                 } else {
354                                     llval
355                                 }
356                             }
357                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
358                             (CastTy::FnPtr, CastTy::Ptr(_)) |
359                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
360                                 bx.pointercast(llval, ll_t_out),
361                             (CastTy::Ptr(_), CastTy::Int(_)) |
362                             (CastTy::FnPtr, CastTy::Int(_)) =>
363                                 bx.ptrtoint(llval, ll_t_out),
364                             (CastTy::Int(_), CastTy::Ptr(_)) => {
365                                 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
366                                 bx.inttoptr(usize_llval, ll_t_out)
367                             }
368                             (CastTy::Int(_), CastTy::Float) =>
369                                 cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
370                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
371                                 cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
372                             (CastTy::Float, CastTy::Int(_)) =>
373                                 cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
374                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
375                         };
376                         OperandValue::Immediate(newval)
377                     }
378                 };
379                 (bx, OperandRef {
380                     val,
381                     layout: cast
382                 })
383             }
384
385             mir::Rvalue::Ref(_, bk, ref place) => {
386                 let cg_place = self.codegen_place(&bx, place);
387
388                 let ty = cg_place.layout.ty;
389
390                 // Note: places are indirect, so storing the `llval` into the
391                 // destination effectively creates a reference.
392                 let val = if !bx.cx().type_has_metadata(ty) {
393                     OperandValue::Immediate(cg_place.llval)
394                 } else {
395                     OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
396                 };
397                 (bx, OperandRef {
398                     val,
399                     layout: self.cx.layout_of(self.cx.tcx().mk_ref(
400                         self.cx.tcx().types.re_erased,
401                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
402                     )),
403                 })
404             }
405
406             mir::Rvalue::Len(ref place) => {
407                 let size = self.evaluate_array_len(&bx, place);
408                 let operand = OperandRef {
409                     val: OperandValue::Immediate(size),
410                     layout: bx.cx().layout_of(bx.tcx().types.usize),
411                 };
412                 (bx, operand)
413             }
414
415             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
416                 let lhs = self.codegen_operand(&bx, lhs);
417                 let rhs = self.codegen_operand(&bx, rhs);
418                 let llresult = match (lhs.val, rhs.val) {
419                     (OperandValue::Pair(lhs_addr, lhs_extra),
420                      OperandValue::Pair(rhs_addr, rhs_extra)) => {
421                         self.codegen_fat_ptr_binop(&bx, op,
422                                                  lhs_addr, lhs_extra,
423                                                  rhs_addr, rhs_extra,
424                                                  lhs.layout.ty)
425                     }
426
427                     (OperandValue::Immediate(lhs_val),
428                      OperandValue::Immediate(rhs_val)) => {
429                         self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
430                     }
431
432                     _ => bug!()
433                 };
434                 let operand = OperandRef {
435                     val: OperandValue::Immediate(llresult),
436                     layout: bx.cx().layout_of(
437                         op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
438                 };
439                 (bx, operand)
440             }
441             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
442                 let lhs = self.codegen_operand(&bx, lhs);
443                 let rhs = self.codegen_operand(&bx, rhs);
444                 let result = self.codegen_scalar_checked_binop(&bx, op,
445                                                              lhs.immediate(), rhs.immediate(),
446                                                              lhs.layout.ty);
447                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
448                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
449                 let operand = OperandRef {
450                     val: result,
451                     layout: bx.cx().layout_of(operand_ty)
452                 };
453
454                 (bx, operand)
455             }
456
457             mir::Rvalue::UnaryOp(op, ref operand) => {
458                 let operand = self.codegen_operand(&bx, operand);
459                 let lloperand = operand.immediate();
460                 let is_float = operand.layout.ty.is_fp();
461                 let llval = match op {
462                     mir::UnOp::Not => bx.not(lloperand),
463                     mir::UnOp::Neg => if is_float {
464                         bx.fneg(lloperand)
465                     } else {
466                         bx.neg(lloperand)
467                     }
468                 };
469                 (bx, OperandRef {
470                     val: OperandValue::Immediate(llval),
471                     layout: operand.layout,
472                 })
473             }
474
475             mir::Rvalue::Discriminant(ref place) => {
476                 let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
477                 let discr =  self.codegen_place(&bx, place)
478                     .codegen_get_discr(&bx, discr_ty);
479                 (bx, OperandRef {
480                     val: OperandValue::Immediate(discr),
481                     layout: self.cx.layout_of(discr_ty)
482                 })
483             }
484
485             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
486                 assert!(bx.cx().type_is_sized(ty));
487                 let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
488                 let tcx = self.cx.tcx();
489                 (bx, OperandRef {
490                     val: OperandValue::Immediate(val),
491                     layout: self.cx.layout_of(tcx.types.usize),
492                 })
493             }
494
495             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
496                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
497                 let (size, align) = bx.cx().layout_of(content_ty).size_and_align();
498                 let llsize = bx.cx().const_usize(size.bytes());
499                 let llalign = bx.cx().const_usize(align.abi());
500                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
501                 let llty_ptr = bx.cx().backend_type(box_layout);
502
503                 // Allocate space:
504                 let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
505                     Ok(id) => id,
506                     Err(s) => {
507                         bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
508                     }
509                 };
510                 let instance = ty::Instance::mono(bx.tcx(), def_id);
511                 let r = bx.cx().get_fn(instance);
512                 let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
513
514                 let operand = OperandRef {
515                     val: OperandValue::Immediate(val),
516                     layout: box_layout,
517                 };
518                 (bx, operand)
519             }
520             mir::Rvalue::Use(ref operand) => {
521                 let operand = self.codegen_operand(&bx, operand);
522                 (bx, operand)
523             }
524             mir::Rvalue::Repeat(..) |
525             mir::Rvalue::Aggregate(..) => {
526                 // According to `rvalue_creates_operand`, only ZST
527                 // aggregate rvalues are allowed to be operands.
528                 let ty = rvalue.ty(self.mir, self.cx.tcx());
529                 (bx, OperandRef::new_zst(self.cx,
530                     self.cx.layout_of(self.monomorphize(&ty))))
531             }
532         }
533     }
534
535     fn evaluate_array_len(
536         &mut self,
537         bx: &Bx,
538         place: &mir::Place<'tcx>,
539     ) -> Bx::Value {
540         // ZST are passed as operands and require special handling
541         // because codegen_place() panics if Local is operand.
542         if let mir::Place::Local(index) = *place {
543             if let LocalRef::Operand(Some(op)) = self.locals[index] {
544                 if let ty::Array(_, n) = op.layout.ty.sty {
545                     let n = n.unwrap_usize(bx.cx().tcx());
546                     return bx.cx().const_usize(n);
547                 }
548             }
549         }
550         // use common size calculation for non zero-sized types
551         let cg_value = self.codegen_place(bx, place);
552         return cg_value.len(bx.cx());
553     }
554
555     pub fn codegen_scalar_binop(
556         &mut self,
557         bx: &Bx,
558         op: mir::BinOp,
559         lhs: Bx::Value,
560         rhs: Bx::Value,
561         input_ty: Ty<'tcx>,
562     ) -> Bx::Value {
563         let is_float = input_ty.is_fp();
564         let is_signed = input_ty.is_signed();
565         let is_unit = input_ty.is_unit();
566         match op {
567             mir::BinOp::Add => if is_float {
568                 bx.fadd(lhs, rhs)
569             } else {
570                 bx.add(lhs, rhs)
571             },
572             mir::BinOp::Sub => if is_float {
573                 bx.fsub(lhs, rhs)
574             } else {
575                 bx.sub(lhs, rhs)
576             },
577             mir::BinOp::Mul => if is_float {
578                 bx.fmul(lhs, rhs)
579             } else {
580                 bx.mul(lhs, rhs)
581             },
582             mir::BinOp::Div => if is_float {
583                 bx.fdiv(lhs, rhs)
584             } else if is_signed {
585                 bx.sdiv(lhs, rhs)
586             } else {
587                 bx.udiv(lhs, rhs)
588             },
589             mir::BinOp::Rem => if is_float {
590                 bx.frem(lhs, rhs)
591             } else if is_signed {
592                 bx.srem(lhs, rhs)
593             } else {
594                 bx.urem(lhs, rhs)
595             },
596             mir::BinOp::BitOr => bx.or(lhs, rhs),
597             mir::BinOp::BitAnd => bx.and(lhs, rhs),
598             mir::BinOp::BitXor => bx.xor(lhs, rhs),
599             mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
600             mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
601             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
602             mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
603             mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
604                 bx.cx().const_bool(match op {
605                     mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
606                     mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
607                     _ => unreachable!()
608                 })
609             } else if is_float {
610                 bx.fcmp(
611                     base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
612                     lhs, rhs
613                 )
614             } else {
615                 bx.icmp(
616                     base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
617                     lhs, rhs
618                 )
619             }
620         }
621     }
622
623     pub fn codegen_fat_ptr_binop(
624         &mut self,
625         bx: &Bx,
626         op: mir::BinOp,
627         lhs_addr: Bx::Value,
628         lhs_extra: Bx::Value,
629         rhs_addr: Bx::Value,
630         rhs_extra: Bx::Value,
631         _input_ty: Ty<'tcx>,
632     ) -> Bx::Value {
633         match op {
634             mir::BinOp::Eq => {
635                 bx.and(
636                     bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
637                     bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra)
638                 )
639             }
640             mir::BinOp::Ne => {
641                 bx.or(
642                     bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr),
643                     bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra)
644                 )
645             }
646             mir::BinOp::Le | mir::BinOp::Lt |
647             mir::BinOp::Ge | mir::BinOp::Gt => {
648                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
649                 let (op, strict_op) = match op {
650                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
651                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
652                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
653                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
654                     _ => bug!(),
655                 };
656
657                 bx.or(
658                     bx.icmp(strict_op, lhs_addr, rhs_addr),
659                     bx.and(
660                         bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
661                         bx.icmp(op, lhs_extra, rhs_extra)
662                     )
663                 )
664             }
665             _ => {
666                 bug!("unexpected fat ptr binop");
667             }
668         }
669     }
670
671     pub fn codegen_scalar_checked_binop(
672         &mut self,
673         bx: &Bx,
674         op: mir::BinOp,
675         lhs: Bx::Value,
676         rhs: Bx::Value,
677         input_ty: Ty<'tcx>
678     ) -> OperandValue<Bx::Value> {
679         // This case can currently arise only from functions marked
680         // with #[rustc_inherit_overflow_checks] and inlined from
681         // another crate (mostly core::num generic/#[inline] fns),
682         // while the current crate doesn't use overflow checks.
683         if !bx.cx().check_overflow() {
684             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
685             return OperandValue::Pair(val, bx.cx().const_bool(false));
686         }
687
688         let (val, of) = match op {
689             // These are checked using intrinsics
690             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
691                 let oop = match op {
692                     mir::BinOp::Add => OverflowOp::Add,
693                     mir::BinOp::Sub => OverflowOp::Sub,
694                     mir::BinOp::Mul => OverflowOp::Mul,
695                     _ => unreachable!()
696                 };
697                 let intrinsic = get_overflow_intrinsic(oop, bx, input_ty);
698                 let res = bx.call(intrinsic, &[lhs, rhs], None);
699
700                 (bx.extract_value(res, 0),
701                  bx.extract_value(res, 1))
702             }
703             mir::BinOp::Shl | mir::BinOp::Shr => {
704                 let lhs_llty = bx.cx().val_ty(lhs);
705                 let rhs_llty = bx.cx().val_ty(rhs);
706                 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
707                 let outer_bits = bx.and(rhs, invert_mask);
708
709                 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
710                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
711
712                 (val, of)
713             }
714             _ => {
715                 bug!("Operator `{:?}` is not a checkable operator", op)
716             }
717         };
718
719         OperandValue::Pair(val, of)
720     }
721 }
722
723 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
724     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
725         match *rvalue {
726             mir::Rvalue::Ref(..) |
727             mir::Rvalue::Len(..) |
728             mir::Rvalue::Cast(..) | // (*)
729             mir::Rvalue::BinaryOp(..) |
730             mir::Rvalue::CheckedBinaryOp(..) |
731             mir::Rvalue::UnaryOp(..) |
732             mir::Rvalue::Discriminant(..) |
733             mir::Rvalue::NullaryOp(..) |
734             mir::Rvalue::Use(..) => // (*)
735                 true,
736             mir::Rvalue::Repeat(..) |
737             mir::Rvalue::Aggregate(..) => {
738                 let ty = rvalue.ty(self.mir, self.cx.tcx());
739                 let ty = self.monomorphize(&ty);
740                 self.cx.layout_of(ty).is_zst()
741             }
742         }
743
744         // (*) this is only true if the type is suitable
745     }
746 }
747
748 #[derive(Copy, Clone)]
749 enum OverflowOp {
750     Add, Sub, Mul
751 }
752
753 fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
754     oop: OverflowOp,
755     bx: &Bx,
756     ty: Ty
757 ) -> Bx::Value {
758     use syntax::ast::IntTy::*;
759     use syntax::ast::UintTy::*;
760     use rustc::ty::{Int, Uint};
761
762     let tcx = bx.tcx();
763
764     let new_sty = match ty.sty {
765         Int(Isize) => Int(tcx.sess.target.isize_ty),
766         Uint(Usize) => Uint(tcx.sess.target.usize_ty),
767         ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
768         _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
769     };
770
771     let name = match oop {
772         OverflowOp::Add => match new_sty {
773             Int(I8) => "llvm.sadd.with.overflow.i8",
774             Int(I16) => "llvm.sadd.with.overflow.i16",
775             Int(I32) => "llvm.sadd.with.overflow.i32",
776             Int(I64) => "llvm.sadd.with.overflow.i64",
777             Int(I128) => "llvm.sadd.with.overflow.i128",
778
779             Uint(U8) => "llvm.uadd.with.overflow.i8",
780             Uint(U16) => "llvm.uadd.with.overflow.i16",
781             Uint(U32) => "llvm.uadd.with.overflow.i32",
782             Uint(U64) => "llvm.uadd.with.overflow.i64",
783             Uint(U128) => "llvm.uadd.with.overflow.i128",
784
785             _ => unreachable!(),
786         },
787         OverflowOp::Sub => match new_sty {
788             Int(I8) => "llvm.ssub.with.overflow.i8",
789             Int(I16) => "llvm.ssub.with.overflow.i16",
790             Int(I32) => "llvm.ssub.with.overflow.i32",
791             Int(I64) => "llvm.ssub.with.overflow.i64",
792             Int(I128) => "llvm.ssub.with.overflow.i128",
793
794             Uint(U8) => "llvm.usub.with.overflow.i8",
795             Uint(U16) => "llvm.usub.with.overflow.i16",
796             Uint(U32) => "llvm.usub.with.overflow.i32",
797             Uint(U64) => "llvm.usub.with.overflow.i64",
798             Uint(U128) => "llvm.usub.with.overflow.i128",
799
800             _ => unreachable!(),
801         },
802         OverflowOp::Mul => match new_sty {
803             Int(I8) => "llvm.smul.with.overflow.i8",
804             Int(I16) => "llvm.smul.with.overflow.i16",
805             Int(I32) => "llvm.smul.with.overflow.i32",
806             Int(I64) => "llvm.smul.with.overflow.i64",
807             Int(I128) => "llvm.smul.with.overflow.i128",
808
809             Uint(U8) => "llvm.umul.with.overflow.i8",
810             Uint(U16) => "llvm.umul.with.overflow.i16",
811             Uint(U32) => "llvm.umul.with.overflow.i32",
812             Uint(U64) => "llvm.umul.with.overflow.i64",
813             Uint(U128) => "llvm.umul.with.overflow.i128",
814
815             _ => unreachable!(),
816         },
817     };
818
819     bx.cx().get_intrinsic(&name)
820 }
821
822 fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
823     bx: &Bx,
824     signed: bool,
825     x: Bx::Value,
826     int_ty: Bx::Type,
827     float_ty: Bx::Type
828 ) -> Bx::Value {
829     // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
830     // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
831     // LLVM's uitofp produces undef in those cases, so we manually check for that case.
832     let is_u128_to_f32 = !signed &&
833         bx.cx().int_width(int_ty) == 128 &&
834         bx.cx().float_width(float_ty) == 32;
835     if is_u128_to_f32 {
836         // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
837         // and for everything else LLVM's uitofp works just fine.
838         use rustc_apfloat::ieee::Single;
839         use rustc_apfloat::Float;
840         const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
841                                             << (Single::MAX_EXP - Single::PRECISION as i16);
842         let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
843         let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
844         let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
845         let infinity = bx.bitcast(infinity_bits, float_ty);
846         bx.select(overflow, infinity, bx.uitofp(x, float_ty))
847     } else {
848         if signed {
849             bx.sitofp(x, float_ty)
850         } else {
851             bx.uitofp(x, float_ty)
852         }
853     }
854 }
855
856 fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
857     bx: &Bx,
858     signed: bool,
859     x: Bx::Value,
860     float_ty: Bx::Type,
861     int_ty: Bx::Type
862 ) -> Bx::Value {
863     let fptosui_result = if signed {
864         bx.fptosi(x, int_ty)
865     } else {
866         bx.fptoui(x, int_ty)
867     };
868
869     if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
870         return fptosui_result;
871     }
872     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
873     // destination integer type after rounding towards zero. This `undef` value can cause UB in
874     // safe code (see issue #10184), so we implement a saturating conversion on top of it:
875     // Semantically, the mathematical value of the input is rounded towards zero to the next
876     // mathematical integer, and then the result is clamped into the range of the destination
877     // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
878     // the destination integer type. NaN is mapped to 0.
879     //
880     // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
881     // a value representable in int_ty.
882     // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
883     // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
884     // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
885     // representable. Note that this only works if float_ty's exponent range is sufficiently large.
886     // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
887     // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
888     // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
889     // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
890     // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
891     let int_max = |signed: bool, int_ty: Bx::Type| -> u128 {
892         let shift_amount = 128 - bx.cx().int_width(int_ty);
893         if signed {
894             i128::MAX as u128 >> shift_amount
895         } else {
896             u128::MAX >> shift_amount
897         }
898     };
899     let int_min = |signed: bool, int_ty: Bx::Type| -> i128 {
900         if signed {
901             i128::MIN >> (128 - bx.cx().int_width(int_ty))
902         } else {
903             0
904         }
905     };
906
907     let compute_clamp_bounds_single =
908     |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
909         let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
910         assert_eq!(rounded_min.status, Status::OK);
911         let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
912         assert!(rounded_max.value.is_finite());
913         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
914     };
915     let compute_clamp_bounds_double =
916     |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
917         let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
918         assert_eq!(rounded_min.status, Status::OK);
919         let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
920         assert!(rounded_max.value.is_finite());
921         (rounded_min.value.to_bits(), rounded_max.value.to_bits())
922     };
923
924     let float_bits_to_llval = |bits| {
925         let bits_llval = match bx.cx().float_width(float_ty) {
926             32 => bx.cx().const_u32(bits as u32),
927             64 => bx.cx().const_u64(bits as u64),
928             n => bug!("unsupported float width {}", n),
929         };
930         bx.bitcast(bits_llval, float_ty)
931     };
932     let (f_min, f_max) = match bx.cx().float_width(float_ty) {
933         32 => compute_clamp_bounds_single(signed, int_ty),
934         64 => compute_clamp_bounds_double(signed, int_ty),
935         n => bug!("unsupported float width {}", n),
936     };
937     let f_min = float_bits_to_llval(f_min);
938     let f_max = float_bits_to_llval(f_max);
939     // To implement saturation, we perform the following steps:
940     //
941     // 1. Cast x to an integer with fpto[su]i. This may result in undef.
942     // 2. Compare x to f_min and f_max, and use the comparison results to select:
943     //  a) int_ty::MIN if x < f_min or x is NaN
944     //  b) int_ty::MAX if x > f_max
945     //  c) the result of fpto[su]i otherwise
946     // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
947     //
948     // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
949     // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
950     // undef does not introduce any non-determinism either.
951     // More importantly, the above procedure correctly implements saturating conversion.
952     // Proof (sketch):
953     // If x is NaN, 0 is returned by definition.
954     // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
955     // This yields three cases to consider:
956     // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
957     //     saturating conversion for inputs in that range.
958     // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
959     //     (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
960     //     than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
961     //     is correct.
962     // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
963     //     int_ty::MIN and therefore the return value of int_ty::MIN is correct.
964     // QED.
965
966     // Step 1 was already performed above.
967
968     // Step 2: We use two comparisons and two selects, with %s1 being the result:
969     //     %less_or_nan = fcmp ult %x, %f_min
970     //     %greater = fcmp olt %x, %f_max
971     //     %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
972     //     %s1 = select %greater, int_ty::MAX, %s0
973     // Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
974     // operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
975     // becomes int_ty::MIN if x is NaN.
976     // Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
977     // negation, and the negation can be merged into the select. Therefore, it not necessarily any
978     // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
979     // performed is ultimately up to the backend, but at least x86 does perform them.
980     let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
981     let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
982     let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
983     let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
984     let s0 = bx.select(less_or_nan, int_min, fptosui_result);
985     let s1 = bx.select(greater, int_max, s0);
986
987     // Step 3: NaN replacement.
988     // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
989     // Therefore we only need to execute this step for signed integer types.
990     if signed {
991         // LLVM has no isNaN predicate, so we use (x == x) instead
992         bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))
993     } else {
994         s1
995     }
996 }