]> git.lizzy.rs Git - rust.git/blob - src/librustc_trans/mir/constant.rs
Auto merge of #48056 - ExpHP:macro-commas, r=dtolnay
[rust.git] / src / librustc_trans / mir / constant.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, ValueRef};
12 use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind};
13 use rustc_const_math::ConstInt::*;
14 use rustc_const_math::{ConstInt, ConstMathErr, MAX_F32_PLUS_HALF_ULP};
15 use rustc::hir::def_id::DefId;
16 use rustc::infer::TransNormalize;
17 use rustc::traits;
18 use rustc::mir;
19 use rustc::mir::tcx::PlaceTy;
20 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
21 use rustc::ty::layout::{self, LayoutOf, Size};
22 use rustc::ty::cast::{CastTy, IntTy};
23 use rustc::ty::subst::{Kind, Substs};
24 use rustc_apfloat::{ieee, Float, Status};
25 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
26 use base;
27 use abi::{self, Abi};
28 use callee;
29 use builder::Builder;
30 use common::{self, CodegenCx, const_get_elt, val_ty};
31 use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_uint_big, C_u32, C_u64};
32 use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr};
33 use common::const_to_opt_u128;
34 use consts;
35 use type_of::LayoutLlvmExt;
36 use type_::Type;
37 use value::Value;
38
39 use syntax_pos::Span;
40 use syntax::ast;
41
42 use std::fmt;
43 use std::ptr;
44
45 use super::operand::{OperandRef, OperandValue};
46 use super::FunctionCx;
47
48 /// A sized constant rvalue.
49 /// The LLVM type might not be the same for a single Rust type,
50 /// e.g. each enum variant would have its own LLVM struct type.
51 #[derive(Copy, Clone)]
52 pub struct Const<'tcx> {
53     pub llval: ValueRef,
54     pub ty: Ty<'tcx>
55 }
56
57 impl<'a, 'tcx> Const<'tcx> {
58     pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> {
59         Const {
60             llval,
61             ty,
62         }
63     }
64
65     pub fn from_constint(cx: &CodegenCx<'a, 'tcx>, ci: &ConstInt) -> Const<'tcx> {
66         let tcx = cx.tcx;
67         let (llval, ty) = match *ci {
68             I8(v) => (C_int(Type::i8(cx), v as i64), tcx.types.i8),
69             I16(v) => (C_int(Type::i16(cx), v as i64), tcx.types.i16),
70             I32(v) => (C_int(Type::i32(cx), v as i64), tcx.types.i32),
71             I64(v) => (C_int(Type::i64(cx), v as i64), tcx.types.i64),
72             I128(v) => (C_uint_big(Type::i128(cx), v as u128), tcx.types.i128),
73             Isize(v) => (C_int(Type::isize(cx), v.as_i64()), tcx.types.isize),
74             U8(v) => (C_uint(Type::i8(cx), v as u64), tcx.types.u8),
75             U16(v) => (C_uint(Type::i16(cx), v as u64), tcx.types.u16),
76             U32(v) => (C_uint(Type::i32(cx), v as u64), tcx.types.u32),
77             U64(v) => (C_uint(Type::i64(cx), v), tcx.types.u64),
78             U128(v) => (C_uint_big(Type::i128(cx), v), tcx.types.u128),
79             Usize(v) => (C_uint(Type::isize(cx), v.as_u64()), tcx.types.usize),
80         };
81         Const { llval: llval, ty: ty }
82     }
83
84     /// Translate ConstVal into a LLVM constant value.
85     pub fn from_constval(cx: &CodegenCx<'a, 'tcx>,
86                          cv: &ConstVal,
87                          ty: Ty<'tcx>)
88                          -> Const<'tcx> {
89         let llty = cx.layout_of(ty).llvm_type(cx);
90         let val = match *cv {
91             ConstVal::Float(v) => {
92                 let bits = match v.ty {
93                     ast::FloatTy::F32 => C_u32(cx, v.bits as u32),
94                     ast::FloatTy::F64 => C_u64(cx, v.bits as u64)
95                 };
96                 consts::bitcast(bits, llty)
97             }
98             ConstVal::Bool(v) => C_bool(cx, v),
99             ConstVal::Integral(ref i) => return Const::from_constint(cx, i),
100             ConstVal::Str(ref v) => C_str_slice(cx, v.clone()),
101             ConstVal::ByteStr(v) => {
102                 consts::addr_of(cx, C_bytes(cx, v.data), cx.align_of(ty), "byte_str")
103             }
104             ConstVal::Char(c) => C_uint(Type::char(cx), c as u64),
105             ConstVal::Function(..) => C_undef(llty),
106             ConstVal::Variant(_) |
107             ConstVal::Aggregate(..) |
108             ConstVal::Unevaluated(..) => {
109                 bug!("MIR must not use `{:?}` (aggregates are expanded to MIR rvalues)", cv)
110             }
111         };
112
113         assert!(!ty.has_erasable_regions());
114
115         Const::new(val, ty)
116     }
117
118     fn get_field(&self, cx: &CodegenCx<'a, 'tcx>, i: usize) -> ValueRef {
119         let layout = cx.layout_of(self.ty);
120         let field = layout.field(cx, i);
121         if field.is_zst() {
122             return C_undef(field.immediate_llvm_type(cx));
123         }
124         let offset = layout.fields.offset(i);
125         match layout.abi {
126             layout::Abi::Scalar(_) |
127             layout::Abi::ScalarPair(..) |
128             layout::Abi::Vector { .. }
129                 if offset.bytes() == 0 && field.size == layout.size => self.llval,
130
131             layout::Abi::ScalarPair(ref a, ref b) => {
132                 if offset.bytes() == 0 {
133                     assert_eq!(field.size, a.value.size(cx));
134                     const_get_elt(self.llval, 0)
135                 } else {
136                     assert_eq!(offset, a.value.size(cx)
137                         .abi_align(b.value.align(cx)));
138                     assert_eq!(field.size, b.value.size(cx));
139                     const_get_elt(self.llval, 1)
140                 }
141             }
142             _ => {
143                 match layout.fields {
144                     layout::FieldPlacement::Union(_) => self.llval,
145                     _ => const_get_elt(self.llval, layout.llvm_field_index(i)),
146                 }
147             }
148         }
149     }
150
151     fn get_pair(&self, cx: &CodegenCx<'a, 'tcx>) -> (ValueRef, ValueRef) {
152         (self.get_field(cx, 0), self.get_field(cx, 1))
153     }
154
155     fn get_fat_ptr(&self, cx: &CodegenCx<'a, 'tcx>) -> (ValueRef, ValueRef) {
156         assert_eq!(abi::FAT_PTR_ADDR, 0);
157         assert_eq!(abi::FAT_PTR_EXTRA, 1);
158         self.get_pair(cx)
159     }
160
161     fn as_place(&self) -> ConstPlace<'tcx> {
162         ConstPlace {
163             base: Base::Value(self.llval),
164             llextra: ptr::null_mut(),
165             ty: self.ty
166         }
167     }
168
169     pub fn to_operand(&self, cx: &CodegenCx<'a, 'tcx>) -> OperandRef<'tcx> {
170         let layout = cx.layout_of(self.ty);
171         let llty = layout.immediate_llvm_type(cx);
172         let llvalty = val_ty(self.llval);
173
174         let val = if llty == llvalty && layout.is_llvm_scalar_pair() {
175             OperandValue::Pair(
176                 const_get_elt(self.llval, 0),
177                 const_get_elt(self.llval, 1))
178         } else if llty == llvalty && layout.is_llvm_immediate() {
179             // If the types match, we can use the value directly.
180             OperandValue::Immediate(self.llval)
181         } else {
182             // Otherwise, or if the value is not immediate, we create
183             // a constant LLVM global and cast its address if necessary.
184             let align = cx.align_of(self.ty);
185             let ptr = consts::addr_of(cx, self.llval, align, "const");
186             OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(cx).ptr_to()),
187                               layout.align)
188         };
189
190         OperandRef {
191             val,
192             layout
193         }
194     }
195 }
196
197 impl<'tcx> fmt::Debug for Const<'tcx> {
198     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
199         write!(f, "Const({:?}: {:?})", Value(self.llval), self.ty)
200     }
201 }
202
203 #[derive(Copy, Clone)]
204 enum Base {
205     /// A constant value without an unique address.
206     Value(ValueRef),
207
208     /// String literal base pointer (cast from array).
209     Str(ValueRef),
210
211     /// The address of a static.
212     Static(ValueRef)
213 }
214
215 /// A place as seen from a constant.
216 #[derive(Copy, Clone)]
217 struct ConstPlace<'tcx> {
218     base: Base,
219     llextra: ValueRef,
220     ty: Ty<'tcx>
221 }
222
223 impl<'tcx> ConstPlace<'tcx> {
224     fn to_const(&self, span: Span) -> Const<'tcx> {
225         match self.base {
226             Base::Value(val) => Const::new(val, self.ty),
227             Base::Str(ptr) => {
228                 span_bug!(span, "loading from `str` ({:?}) in constant",
229                           Value(ptr))
230             }
231             Base::Static(val) => {
232                 span_bug!(span, "loading from `static` ({:?}) in constant",
233                           Value(val))
234             }
235         }
236     }
237
238     pub fn len<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> ValueRef {
239         match self.ty.sty {
240             ty::TyArray(_, n) => {
241                 C_usize(cx, n.val.to_const_int().unwrap().to_u64().unwrap())
242             }
243             ty::TySlice(_) | ty::TyStr => {
244                 assert!(self.llextra != ptr::null_mut());
245                 self.llextra
246             }
247             _ => bug!("unexpected type `{}` in ConstPlace::len", self.ty)
248         }
249     }
250 }
251
252 /// Machinery for translating a constant's MIR to LLVM values.
253 /// FIXME(eddyb) use miri and lower its allocations to LLVM.
254 struct MirConstContext<'a, 'tcx: 'a> {
255     cx: &'a CodegenCx<'a, 'tcx>,
256     mir: &'a mir::Mir<'tcx>,
257
258     /// Type parameters for const fn and associated constants.
259     substs: &'tcx Substs<'tcx>,
260
261     /// Values of locals in a constant or const fn.
262     locals: IndexVec<mir::Local, Option<Result<Const<'tcx>, ConstEvalErr<'tcx>>>>
263 }
264
265 fn add_err<'tcx, U, V>(failure: &mut Result<U, ConstEvalErr<'tcx>>,
266                        value: &Result<V, ConstEvalErr<'tcx>>)
267 {
268     if let &Err(ref err) = value {
269         if failure.is_ok() {
270             *failure = Err(err.clone());
271         }
272     }
273 }
274
275 impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
276     fn new(cx: &'a CodegenCx<'a, 'tcx>,
277            mir: &'a mir::Mir<'tcx>,
278            substs: &'tcx Substs<'tcx>,
279            args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
280            -> MirConstContext<'a, 'tcx> {
281         let mut context = MirConstContext {
282             cx,
283             mir,
284             substs,
285             locals: (0..mir.local_decls.len()).map(|_| None).collect(),
286         };
287         for (i, arg) in args.into_iter().enumerate() {
288             // Locals after local 0 are the function arguments
289             let index = mir::Local::new(i + 1);
290             context.locals[index] = Some(arg);
291         }
292         context
293     }
294
295     fn trans_def(cx: &'a CodegenCx<'a, 'tcx>,
296                  def_id: DefId,
297                  substs: &'tcx Substs<'tcx>,
298                  args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
299                  -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
300         let instance = ty::Instance::resolve(cx.tcx,
301                                              ty::ParamEnv::empty(traits::Reveal::All),
302                                              def_id,
303                                              substs).unwrap();
304         let mir = cx.tcx.instance_mir(instance.def);
305         MirConstContext::new(cx, &mir, instance.substs, args).trans()
306     }
307
308     fn monomorphize<T>(&self, value: &T) -> T
309         where T: TransNormalize<'tcx>
310     {
311         self.cx.tcx.trans_apply_param_substs(self.substs, value)
312     }
313
314     fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
315         let tcx = self.cx.tcx;
316         let mut bb = mir::START_BLOCK;
317
318         // Make sure to evaluate all statemenets to
319         // report as many errors as we possibly can.
320         let mut failure = Ok(());
321
322         loop {
323             let data = &self.mir[bb];
324             for statement in &data.statements {
325                 let span = statement.source_info.span;
326                 match statement.kind {
327                     mir::StatementKind::Assign(ref dest, ref rvalue) => {
328                         let ty = dest.ty(self.mir, tcx);
329                         let ty = self.monomorphize(&ty).to_ty(tcx);
330                         let value = self.const_rvalue(rvalue, ty, span);
331                         add_err(&mut failure, &value);
332                         self.store(dest, value, span);
333                     }
334                     mir::StatementKind::StorageLive(_) |
335                     mir::StatementKind::StorageDead(_) |
336                     mir::StatementKind::Validate(..) |
337                     mir::StatementKind::EndRegion(_) |
338                     mir::StatementKind::Nop => {}
339                     mir::StatementKind::InlineAsm { .. } |
340                     mir::StatementKind::SetDiscriminant{ .. } => {
341                         span_bug!(span, "{:?} should not appear in constants?", statement.kind);
342                     }
343                 }
344             }
345
346             let terminator = data.terminator();
347             let span = terminator.source_info.span;
348             bb = match terminator.kind {
349                 mir::TerminatorKind::Drop { target, .. } | // No dropping.
350                 mir::TerminatorKind::Goto { target } => target,
351                 mir::TerminatorKind::Return => {
352                     failure?;
353                     return self.locals[mir::RETURN_PLACE].clone().unwrap_or_else(|| {
354                         span_bug!(span, "no returned value in constant");
355                     });
356                 }
357
358                 mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => {
359                     let cond = self.const_operand(cond, span)?;
360                     let cond_bool = common::const_to_uint(cond.llval) != 0;
361                     if cond_bool != expected {
362                         let err = match *msg {
363                             mir::AssertMessage::BoundsCheck { ref len, ref index } => {
364                                 let len = self.const_operand(len, span)?;
365                                 let index = self.const_operand(index, span)?;
366                                 ErrKind::IndexOutOfBounds {
367                                     len: common::const_to_uint(len.llval),
368                                     index: common::const_to_uint(index.llval)
369                                 }
370                             }
371                             mir::AssertMessage::Math(ref err) => {
372                                 ErrKind::Math(err.clone())
373                             }
374                             mir::AssertMessage::GeneratorResumedAfterReturn |
375                             mir::AssertMessage::GeneratorResumedAfterPanic =>
376                                 span_bug!(span, "{:?} should not appear in constants?", msg),
377                         };
378
379                         let err = ConstEvalErr { span: span, kind: err };
380                         err.report(tcx, span, "expression");
381                         failure = Err(err);
382                     }
383                     target
384                 }
385
386                 mir::TerminatorKind::Call { ref func, ref args, ref destination, .. } => {
387                     let fn_ty = func.ty(self.mir, tcx);
388                     let fn_ty = self.monomorphize(&fn_ty);
389                     let (def_id, substs) = match fn_ty.sty {
390                         ty::TyFnDef(def_id, substs) => (def_id, substs),
391                         _ => span_bug!(span, "calling {:?} (of type {}) in constant",
392                                        func, fn_ty)
393                     };
394
395                     let mut arg_vals = IndexVec::with_capacity(args.len());
396                     for arg in args {
397                         let arg_val = self.const_operand(arg, span);
398                         add_err(&mut failure, &arg_val);
399                         arg_vals.push(arg_val);
400                     }
401                     if let Some((ref dest, target)) = *destination {
402                         let result = if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic {
403                             match &tcx.item_name(def_id)[..] {
404                                 "size_of" => {
405                                     let llval = C_usize(self.cx,
406                                         self.cx.size_of(substs.type_at(0)).bytes());
407                                     Ok(Const::new(llval, tcx.types.usize))
408                                 }
409                                 "min_align_of" => {
410                                     let llval = C_usize(self.cx,
411                                         self.cx.align_of(substs.type_at(0)).abi());
412                                     Ok(Const::new(llval, tcx.types.usize))
413                                 }
414                                 "type_id" => {
415                                     let llval = C_u64(self.cx,
416                                         self.cx.tcx.type_id_hash(substs.type_at(0)));
417                                     Ok(Const::new(llval, tcx.types.u64))
418                                 }
419                                 _ => span_bug!(span, "{:?} in constant", terminator.kind)
420                             }
421                         } else if let Some((op, is_checked)) = self.is_binop_lang_item(def_id) {
422                             (||{
423                                 assert_eq!(arg_vals.len(), 2);
424                                 let rhs = arg_vals.pop().unwrap()?;
425                                 let lhs = arg_vals.pop().unwrap()?;
426                                 if !is_checked {
427                                     let binop_ty = op.ty(tcx, lhs.ty, rhs.ty);
428                                     let (lhs, rhs) = (lhs.llval, rhs.llval);
429                                     Ok(Const::new(const_scalar_binop(op, lhs, rhs, binop_ty),
430                                                   binop_ty))
431                                 } else {
432                                     let ty = lhs.ty;
433                                     let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
434                                     let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool], false);
435                                     let (lhs, rhs) = (lhs.llval, rhs.llval);
436                                     assert!(!ty.is_fp());
437
438                                     match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
439                                         Some((llval, of)) => {
440                                             Ok(trans_const_adt(
441                                                 self.cx,
442                                                 binop_ty,
443                                                 &mir::AggregateKind::Tuple,
444                                                 &[
445                                                     Const::new(llval, val_ty),
446                                                     Const::new(C_bool(self.cx, of), tcx.types.bool)
447                                                 ]))
448                                         }
449                                         None => {
450                                             span_bug!(span,
451                                                 "{:?} got non-integer operands: {:?} and {:?}",
452                                                 op, Value(lhs), Value(rhs));
453                                         }
454                                     }
455                                 }
456                             })()
457                         } else {
458                             MirConstContext::trans_def(self.cx, def_id, substs, arg_vals)
459                         };
460                         add_err(&mut failure, &result);
461                         self.store(dest, result, span);
462                         target
463                     } else {
464                         span_bug!(span, "diverging {:?} in constant", terminator.kind);
465                     }
466                 }
467                 _ => span_bug!(span, "{:?} in constant", terminator.kind)
468             };
469         }
470     }
471
472     fn is_binop_lang_item(&mut self, def_id: DefId) -> Option<(mir::BinOp, bool)> {
473         let tcx = self.cx.tcx;
474         let items = tcx.lang_items();
475         let def_id = Some(def_id);
476         if items.i128_add_fn() == def_id { Some((mir::BinOp::Add, false)) }
477         else if items.u128_add_fn() == def_id { Some((mir::BinOp::Add, false)) }
478         else if items.i128_sub_fn() == def_id { Some((mir::BinOp::Sub, false)) }
479         else if items.u128_sub_fn() == def_id { Some((mir::BinOp::Sub, false)) }
480         else if items.i128_mul_fn() == def_id { Some((mir::BinOp::Mul, false)) }
481         else if items.u128_mul_fn() == def_id { Some((mir::BinOp::Mul, false)) }
482         else if items.i128_div_fn() == def_id { Some((mir::BinOp::Div, false)) }
483         else if items.u128_div_fn() == def_id { Some((mir::BinOp::Div, false)) }
484         else if items.i128_rem_fn() == def_id { Some((mir::BinOp::Rem, false)) }
485         else if items.u128_rem_fn() == def_id { Some((mir::BinOp::Rem, false)) }
486         else if items.i128_shl_fn() == def_id { Some((mir::BinOp::Shl, false)) }
487         else if items.u128_shl_fn() == def_id { Some((mir::BinOp::Shl, false)) }
488         else if items.i128_shr_fn() == def_id { Some((mir::BinOp::Shr, false)) }
489         else if items.u128_shr_fn() == def_id { Some((mir::BinOp::Shr, false)) }
490         else if items.i128_addo_fn() == def_id { Some((mir::BinOp::Add, true)) }
491         else if items.u128_addo_fn() == def_id { Some((mir::BinOp::Add, true)) }
492         else if items.i128_subo_fn() == def_id { Some((mir::BinOp::Sub, true)) }
493         else if items.u128_subo_fn() == def_id { Some((mir::BinOp::Sub, true)) }
494         else if items.i128_mulo_fn() == def_id { Some((mir::BinOp::Mul, true)) }
495         else if items.u128_mulo_fn() == def_id { Some((mir::BinOp::Mul, true)) }
496         else if items.i128_shlo_fn() == def_id { Some((mir::BinOp::Shl, true)) }
497         else if items.u128_shlo_fn() == def_id { Some((mir::BinOp::Shl, true)) }
498         else if items.i128_shro_fn() == def_id { Some((mir::BinOp::Shr, true)) }
499         else if items.u128_shro_fn() == def_id { Some((mir::BinOp::Shr, true)) }
500         else { None }
501     }
502
503     fn store(&mut self,
504              dest: &mir::Place<'tcx>,
505              value: Result<Const<'tcx>, ConstEvalErr<'tcx>>,
506              span: Span) {
507         if let mir::Place::Local(index) = *dest {
508             self.locals[index] = Some(value);
509         } else {
510             span_bug!(span, "assignment to {:?} in constant", dest);
511         }
512     }
513
514     fn const_place(&self, place: &mir::Place<'tcx>, span: Span)
515                     -> Result<ConstPlace<'tcx>, ConstEvalErr<'tcx>> {
516         let tcx = self.cx.tcx;
517
518         if let mir::Place::Local(index) = *place {
519             return self.locals[index].clone().unwrap_or_else(|| {
520                 span_bug!(span, "{:?} not initialized", place)
521             }).map(|v| v.as_place());
522         }
523
524         let place = match *place {
525             mir::Place::Local(_)  => bug!(), // handled above
526             mir::Place::Static(box mir::Static { def_id, ty }) => {
527                 ConstPlace {
528                     base: Base::Static(consts::get_static(self.cx, def_id)),
529                     llextra: ptr::null_mut(),
530                     ty: self.monomorphize(&ty),
531                 }
532             }
533             mir::Place::Projection(ref projection) => {
534                 let tr_base = self.const_place(&projection.base, span)?;
535                 let projected_ty = PlaceTy::Ty { ty: tr_base.ty }
536                     .projection_ty(tcx, &projection.elem);
537                 let base = tr_base.to_const(span);
538                 let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx);
539                 let has_metadata = self.cx.type_has_metadata(projected_ty);
540
541                 let (projected, llextra) = match projection.elem {
542                     mir::ProjectionElem::Deref => {
543                         let (base, extra) = if !has_metadata {
544                             (base.llval, ptr::null_mut())
545                         } else {
546                             base.get_fat_ptr(self.cx)
547                         };
548                         if self.cx.statics.borrow().contains_key(&base) {
549                             (Base::Static(base), extra)
550                         } else if let ty::TyStr = projected_ty.sty {
551                             (Base::Str(base), extra)
552                         } else {
553                             let v = base;
554                             let v = self.cx.const_unsized.borrow().get(&v).map_or(v, |&v| v);
555                             let mut val = unsafe { llvm::LLVMGetInitializer(v) };
556                             if val.is_null() {
557                                 span_bug!(span, "dereference of non-constant pointer `{:?}`",
558                                           Value(base));
559                             }
560                             let layout = self.cx.layout_of(projected_ty);
561                             if let layout::Abi::Scalar(ref scalar) = layout.abi {
562                                 let i1_type = Type::i1(self.cx);
563                                 if scalar.is_bool() && val_ty(val) != i1_type {
564                                     unsafe {
565                                         val = llvm::LLVMConstTrunc(val, i1_type.to_ref());
566                                     }
567                                 }
568                             }
569                             (Base::Value(val), extra)
570                         }
571                     }
572                     mir::ProjectionElem::Field(ref field, _) => {
573                         let llprojected = base.get_field(self.cx, field.index());
574                         let llextra = if !has_metadata {
575                             ptr::null_mut()
576                         } else {
577                             tr_base.llextra
578                         };
579                         (Base::Value(llprojected), llextra)
580                     }
581                     mir::ProjectionElem::Index(index) => {
582                         let index = &mir::Operand::Copy(mir::Place::Local(index));
583                         let llindex = self.const_operand(index, span)?.llval;
584
585                         let iv = if let Some(iv) = common::const_to_opt_u128(llindex, false) {
586                             iv
587                         } else {
588                             span_bug!(span, "index is not an integer-constant expression")
589                         };
590
591                         // Produce an undef instead of a LLVM assertion on OOB.
592                         let len = common::const_to_uint(tr_base.len(self.cx));
593                         let llelem = if iv < len as u128 {
594                             const_get_elt(base.llval, iv as u64)
595                         } else {
596                             C_undef(self.cx.layout_of(projected_ty).llvm_type(self.cx))
597                         };
598
599                         (Base::Value(llelem), ptr::null_mut())
600                     }
601                     _ => span_bug!(span, "{:?} in constant", projection.elem)
602                 };
603                 ConstPlace {
604                     base: projected,
605                     llextra,
606                     ty: projected_ty
607                 }
608             }
609         };
610         Ok(place)
611     }
612
613     fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
614                      -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
615         debug!("const_operand({:?} @ {:?})", operand, span);
616         let result = match *operand {
617             mir::Operand::Copy(ref place) |
618             mir::Operand::Move(ref place) => {
619                 Ok(self.const_place(place, span)?.to_const(span))
620             }
621
622             mir::Operand::Constant(ref constant) => {
623                 let ty = self.monomorphize(&constant.ty);
624                 match constant.literal.clone() {
625                     mir::Literal::Promoted { index } => {
626                         let mir = &self.mir.promoted[index];
627                         MirConstContext::new(self.cx, mir, self.substs, IndexVec::new()).trans()
628                     }
629                     mir::Literal::Value { value } => {
630                         if let ConstVal::Unevaluated(def_id, substs) = value.val {
631                             let substs = self.monomorphize(&substs);
632                             MirConstContext::trans_def(self.cx, def_id, substs, IndexVec::new())
633                         } else {
634                             Ok(Const::from_constval(self.cx, &value.val, ty))
635                         }
636                     }
637                 }
638             }
639         };
640         debug!("const_operand({:?} @ {:?}) = {:?}", operand, span,
641                result.as_ref().ok());
642         result
643     }
644
645     fn const_array(&self, array_ty: Ty<'tcx>, fields: &[ValueRef])
646                    -> Const<'tcx>
647     {
648         let elem_ty = array_ty.builtin_index().unwrap_or_else(|| {
649             bug!("bad array type {:?}", array_ty)
650         });
651         let llunitty = self.cx.layout_of(elem_ty).llvm_type(self.cx);
652         // If the array contains enums, an LLVM array won't work.
653         let val = if fields.iter().all(|&f| val_ty(f) == llunitty) {
654             C_array(llunitty, fields)
655         } else {
656             C_struct(self.cx, fields, false)
657         };
658         Const::new(val, array_ty)
659     }
660
661     fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
662                     dest_ty: Ty<'tcx>, span: Span)
663                     -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
664         let tcx = self.cx.tcx;
665         debug!("const_rvalue({:?}: {:?} @ {:?})", rvalue, dest_ty, span);
666         let val = match *rvalue {
667             mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
668
669             mir::Rvalue::Repeat(ref elem, count) => {
670                 let elem = self.const_operand(elem, span)?;
671                 let size = count.as_u64();
672                 assert_eq!(size as usize as u64, size);
673                 let fields = vec![elem.llval; size as usize];
674                 self.const_array(dest_ty, &fields)
675             }
676
677             mir::Rvalue::Aggregate(box mir::AggregateKind::Array(_), ref operands) => {
678                 // Make sure to evaluate all operands to
679                 // report as many errors as we possibly can.
680                 let mut fields = Vec::with_capacity(operands.len());
681                 let mut failure = Ok(());
682                 for operand in operands {
683                     match self.const_operand(operand, span) {
684                         Ok(val) => fields.push(val.llval),
685                         Err(err) => if failure.is_ok() { failure = Err(err); }
686                     }
687                 }
688                 failure?;
689
690                 self.const_array(dest_ty, &fields)
691             }
692
693             mir::Rvalue::Aggregate(ref kind, ref operands) => {
694                 // Make sure to evaluate all operands to
695                 // report as many errors as we possibly can.
696                 let mut fields = Vec::with_capacity(operands.len());
697                 let mut failure = Ok(());
698                 for operand in operands {
699                     match self.const_operand(operand, span) {
700                         Ok(val) => fields.push(val),
701                         Err(err) => if failure.is_ok() { failure = Err(err); }
702                     }
703                 }
704                 failure?;
705
706                 trans_const_adt(self.cx, dest_ty, kind, &fields)
707             }
708
709             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
710                 let operand = self.const_operand(source, span)?;
711                 let cast_ty = self.monomorphize(&cast_ty);
712
713                 let val = match *kind {
714                     mir::CastKind::ReifyFnPointer => {
715                         match operand.ty.sty {
716                             ty::TyFnDef(def_id, substs) => {
717                                 if tcx.has_attr(def_id, "rustc_args_required_const") {
718                                     bug!("reifying a fn ptr that requires \
719                                           const arguments");
720                                 }
721                                 callee::resolve_and_get_fn(self.cx, def_id, substs)
722                             }
723                             _ => {
724                                 span_bug!(span, "{} cannot be reified to a fn ptr",
725                                           operand.ty)
726                             }
727                         }
728                     }
729                     mir::CastKind::ClosureFnPointer => {
730                         match operand.ty.sty {
731                             ty::TyClosure(def_id, substs) => {
732                                 // Get the def_id for FnOnce::call_once
733                                 let fn_once = tcx.lang_items().fn_once_trait().unwrap();
734                                 let call_once = tcx
735                                     .global_tcx().associated_items(fn_once)
736                                     .find(|it| it.kind == ty::AssociatedKind::Method)
737                                     .unwrap().def_id;
738                                 // Now create its substs [Closure, Tuple]
739                                 let input = substs.closure_sig(def_id, tcx).input(0);
740                                 let input = tcx.erase_late_bound_regions_and_normalize(&input);
741                                 let substs = tcx.mk_substs([operand.ty, input]
742                                     .iter().cloned().map(Kind::from));
743                                 callee::resolve_and_get_fn(self.cx, call_once, substs)
744                             }
745                             _ => {
746                                 bug!("{} cannot be cast to a fn ptr", operand.ty)
747                             }
748                         }
749                     }
750                     mir::CastKind::UnsafeFnPointer => {
751                         // this is a no-op at the LLVM level
752                         operand.llval
753                     }
754                     mir::CastKind::Unsize => {
755                         let pointee_ty = operand.ty.builtin_deref(true)
756                             .expect("consts: unsizing got non-pointer type").ty;
757                         let (base, old_info) = if !self.cx.type_is_sized(pointee_ty) {
758                             // Normally, the source is a thin pointer and we are
759                             // adding extra info to make a fat pointer. The exception
760                             // is when we are upcasting an existing object fat pointer
761                             // to use a different vtable. In that case, we want to
762                             // load out the original data pointer so we can repackage
763                             // it.
764                             let (base, extra) = operand.get_fat_ptr(self.cx);
765                             (base, Some(extra))
766                         } else {
767                             (operand.llval, None)
768                         };
769
770                         let unsized_ty = cast_ty.builtin_deref(true)
771                             .expect("consts: unsizing got non-pointer target type").ty;
772                         let ptr_ty = self.cx.layout_of(unsized_ty).llvm_type(self.cx).ptr_to();
773                         let base = consts::ptrcast(base, ptr_ty);
774                         let info = base::unsized_info(self.cx, pointee_ty,
775                                                       unsized_ty, old_info);
776
777                         if old_info.is_none() {
778                             let prev_const = self.cx.const_unsized.borrow_mut()
779                                                      .insert(base, operand.llval);
780                             assert!(prev_const.is_none() || prev_const == Some(operand.llval));
781                         }
782                         C_fat_ptr(self.cx, base, info)
783                     }
784                     mir::CastKind::Misc if self.cx.layout_of(operand.ty).is_llvm_immediate() => {
785                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
786                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
787                         let cast_layout = self.cx.layout_of(cast_ty);
788                         assert!(cast_layout.is_llvm_immediate());
789                         let ll_t_out = cast_layout.immediate_llvm_type(self.cx);
790                         let llval = operand.llval;
791
792                         let mut signed = false;
793                         let l = self.cx.layout_of(operand.ty);
794                         if let layout::Abi::Scalar(ref scalar) = l.abi {
795                             if let layout::Int(_, true) = scalar.value {
796                                 signed = true;
797                             }
798                         }
799
800                         unsafe {
801                             match (r_t_in, r_t_out) {
802                                 (CastTy::Int(_), CastTy::Int(_)) => {
803                                     let s = signed as llvm::Bool;
804                                     llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s)
805                                 }
806                                 (CastTy::Int(_), CastTy::Float) => {
807                                     cast_const_int_to_float(self.cx, llval, signed, ll_t_out)
808                                 }
809                                 (CastTy::Float, CastTy::Float) => {
810                                     llvm::LLVMConstFPCast(llval, ll_t_out.to_ref())
811                                 }
812                                 (CastTy::Float, CastTy::Int(IntTy::I)) => {
813                                     cast_const_float_to_int(self.cx, &operand,
814                                                             true, ll_t_out, span)
815                                 }
816                                 (CastTy::Float, CastTy::Int(_)) => {
817                                     cast_const_float_to_int(self.cx, &operand,
818                                                             false, ll_t_out, span)
819                                 }
820                                 (CastTy::Ptr(_), CastTy::Ptr(_)) |
821                                 (CastTy::FnPtr, CastTy::Ptr(_)) |
822                                 (CastTy::RPtr(_), CastTy::Ptr(_)) => {
823                                     consts::ptrcast(llval, ll_t_out)
824                                 }
825                                 (CastTy::Int(_), CastTy::Ptr(_)) => {
826                                     let s = signed as llvm::Bool;
827                                     let usize_llval = llvm::LLVMConstIntCast(llval,
828                                         self.cx.isize_ty.to_ref(), s);
829                                     llvm::LLVMConstIntToPtr(usize_llval, ll_t_out.to_ref())
830                                 }
831                                 (CastTy::Ptr(_), CastTy::Int(_)) |
832                                 (CastTy::FnPtr, CastTy::Int(_)) => {
833                                     llvm::LLVMConstPtrToInt(llval, ll_t_out.to_ref())
834                                 }
835                                 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
836                             }
837                         }
838                     }
839                     mir::CastKind::Misc => { // Casts from a fat-ptr.
840                         let l = self.cx.layout_of(operand.ty);
841                         let cast = self.cx.layout_of(cast_ty);
842                         if l.is_llvm_scalar_pair() {
843                             let (data_ptr, meta) = operand.get_fat_ptr(self.cx);
844                             if cast.is_llvm_scalar_pair() {
845                                 let data_cast = consts::ptrcast(data_ptr,
846                                     cast.scalar_pair_element_llvm_type(self.cx, 0));
847                                 C_fat_ptr(self.cx, data_cast, meta)
848                             } else { // cast to thin-ptr
849                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
850                                 // pointer-cast of that pointer to desired pointer type.
851                                 let llcast_ty = cast.immediate_llvm_type(self.cx);
852                                 consts::ptrcast(data_ptr, llcast_ty)
853                             }
854                         } else {
855                             bug!("Unexpected non-fat-pointer operand")
856                         }
857                     }
858                 };
859                 Const::new(val, cast_ty)
860             }
861
862             mir::Rvalue::Ref(_, bk, ref place) => {
863                 let tr_place = self.const_place(place, span)?;
864
865                 let ty = tr_place.ty;
866                 let ref_ty = tcx.mk_ref(tcx.types.re_erased,
867                     ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
868
869                 let base = match tr_place.base {
870                     Base::Value(llval) => {
871                         // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
872                         let align = if self.cx.type_is_sized(ty) {
873                             self.cx.align_of(ty)
874                         } else {
875                             self.cx.tcx.data_layout.pointer_align
876                         };
877                         if let mir::BorrowKind::Mut { .. } = bk {
878                             consts::addr_of_mut(self.cx, llval, align, "ref_mut")
879                         } else {
880                             consts::addr_of(self.cx, llval, align, "ref")
881                         }
882                     }
883                     Base::Str(llval) |
884                     Base::Static(llval) => llval
885                 };
886
887                 let ptr = if self.cx.type_is_sized(ty) {
888                     base
889                 } else {
890                     C_fat_ptr(self.cx, base, tr_place.llextra)
891                 };
892                 Const::new(ptr, ref_ty)
893             }
894
895             mir::Rvalue::Len(ref place) => {
896                 let tr_place = self.const_place(place, span)?;
897                 Const::new(tr_place.len(self.cx), tcx.types.usize)
898             }
899
900             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
901                 let lhs = self.const_operand(lhs, span)?;
902                 let rhs = self.const_operand(rhs, span)?;
903                 let ty = lhs.ty;
904                 let binop_ty = op.ty(tcx, lhs.ty, rhs.ty);
905                 let (lhs, rhs) = (lhs.llval, rhs.llval);
906                 Const::new(const_scalar_binop(op, lhs, rhs, ty), binop_ty)
907             }
908
909             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
910                 let lhs = self.const_operand(lhs, span)?;
911                 let rhs = self.const_operand(rhs, span)?;
912                 let ty = lhs.ty;
913                 let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
914                 let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool], false);
915                 let (lhs, rhs) = (lhs.llval, rhs.llval);
916                 assert!(!ty.is_fp());
917
918                 match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
919                     Some((llval, of)) => {
920                         trans_const_adt(self.cx, binop_ty, &mir::AggregateKind::Tuple, &[
921                             Const::new(llval, val_ty),
922                             Const::new(C_bool(self.cx, of), tcx.types.bool)
923                         ])
924                     }
925                     None => {
926                         span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}",
927                                   rvalue, Value(lhs), Value(rhs));
928                     }
929                 }
930             }
931
932             mir::Rvalue::UnaryOp(op, ref operand) => {
933                 let operand = self.const_operand(operand, span)?;
934                 let lloperand = operand.llval;
935                 let llval = match op {
936                     mir::UnOp::Not => {
937                         unsafe {
938                             llvm::LLVMConstNot(lloperand)
939                         }
940                     }
941                     mir::UnOp::Neg => {
942                         let is_float = operand.ty.is_fp();
943                         unsafe {
944                             if is_float {
945                                 llvm::LLVMConstFNeg(lloperand)
946                             } else {
947                                 llvm::LLVMConstNeg(lloperand)
948                             }
949                         }
950                     }
951                 };
952                 Const::new(llval, operand.ty)
953             }
954
955             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
956                 assert!(self.cx.type_is_sized(ty));
957                 let llval = C_usize(self.cx, self.cx.size_of(ty).bytes());
958                 Const::new(llval, tcx.types.usize)
959             }
960
961             _ => span_bug!(span, "{:?} in constant", rvalue)
962         };
963
964         debug!("const_rvalue({:?}: {:?} @ {:?}) = {:?}", rvalue, dest_ty, span, val);
965
966         Ok(val)
967     }
968
969 }
970
971 fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
972     match t.sty {
973         ty::TyInt(int_type) => const_to_opt_u128(value, true)
974             .and_then(|input| ConstInt::new_signed(input as i128, int_type,
975                                                    tcx.sess.target.isize_ty)),
976         ty::TyUint(uint_type) => const_to_opt_u128(value, false)
977             .and_then(|input| ConstInt::new_unsigned(input, uint_type,
978                                                      tcx.sess.target.usize_ty)),
979         _ => None
980
981     }
982 }
983
984 pub fn const_scalar_binop(op: mir::BinOp,
985                           lhs: ValueRef,
986                           rhs: ValueRef,
987                           input_ty: Ty) -> ValueRef {
988     assert!(!input_ty.is_simd());
989     let is_float = input_ty.is_fp();
990     let signed = input_ty.is_signed();
991
992     unsafe {
993         match op {
994             mir::BinOp::Add if is_float => llvm::LLVMConstFAdd(lhs, rhs),
995             mir::BinOp::Add             => llvm::LLVMConstAdd(lhs, rhs),
996
997             mir::BinOp::Sub if is_float => llvm::LLVMConstFSub(lhs, rhs),
998             mir::BinOp::Sub             => llvm::LLVMConstSub(lhs, rhs),
999
1000             mir::BinOp::Mul if is_float => llvm::LLVMConstFMul(lhs, rhs),
1001             mir::BinOp::Mul             => llvm::LLVMConstMul(lhs, rhs),
1002
1003             mir::BinOp::Div if is_float => llvm::LLVMConstFDiv(lhs, rhs),
1004             mir::BinOp::Div if signed   => llvm::LLVMConstSDiv(lhs, rhs),
1005             mir::BinOp::Div             => llvm::LLVMConstUDiv(lhs, rhs),
1006
1007             mir::BinOp::Rem if is_float => llvm::LLVMConstFRem(lhs, rhs),
1008             mir::BinOp::Rem if signed   => llvm::LLVMConstSRem(lhs, rhs),
1009             mir::BinOp::Rem             => llvm::LLVMConstURem(lhs, rhs),
1010
1011             mir::BinOp::BitXor => llvm::LLVMConstXor(lhs, rhs),
1012             mir::BinOp::BitAnd => llvm::LLVMConstAnd(lhs, rhs),
1013             mir::BinOp::BitOr  => llvm::LLVMConstOr(lhs, rhs),
1014             mir::BinOp::Shl    => {
1015                 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
1016                 llvm::LLVMConstShl(lhs, rhs)
1017             }
1018             mir::BinOp::Shr    => {
1019                 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
1020                 if signed { llvm::LLVMConstAShr(lhs, rhs) }
1021                 else      { llvm::LLVMConstLShr(lhs, rhs) }
1022             }
1023             mir::BinOp::Eq | mir::BinOp::Ne |
1024             mir::BinOp::Lt | mir::BinOp::Le |
1025             mir::BinOp::Gt | mir::BinOp::Ge => {
1026                 if is_float {
1027                     let cmp = base::bin_op_to_fcmp_predicate(op.to_hir_binop());
1028                     llvm::LLVMConstFCmp(cmp, lhs, rhs)
1029                 } else {
1030                     let cmp = base::bin_op_to_icmp_predicate(op.to_hir_binop(),
1031                                                                 signed);
1032                     llvm::LLVMConstICmp(cmp, lhs, rhs)
1033                 }
1034             }
1035             mir::BinOp::Offset => unreachable!("BinOp::Offset in const-eval!")
1036         }
1037     }
1038 }
1039
1040 pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1041                                             op: mir::BinOp,
1042                                             lllhs: ValueRef,
1043                                             llrhs: ValueRef,
1044                                             input_ty: Ty<'tcx>)
1045                                             -> Option<(ValueRef, bool)> {
1046     if let (Some(lhs), Some(rhs)) = (to_const_int(lllhs, input_ty, tcx),
1047                                      to_const_int(llrhs, input_ty, tcx)) {
1048         let result = match op {
1049             mir::BinOp::Add => lhs + rhs,
1050             mir::BinOp::Sub => lhs - rhs,
1051             mir::BinOp::Mul => lhs * rhs,
1052             mir::BinOp::Shl => lhs << rhs,
1053             mir::BinOp::Shr => lhs >> rhs,
1054             _ => {
1055                 bug!("Operator `{:?}` is not a checkable operator", op)
1056             }
1057         };
1058
1059         let of = match result {
1060             Ok(_) => false,
1061             Err(ConstMathErr::Overflow(_)) |
1062             Err(ConstMathErr::ShiftNegative) => true,
1063             Err(err) => {
1064                 bug!("Operator `{:?}` on `{:?}` and `{:?}` errored: {}",
1065                      op, lhs, rhs, err.description());
1066             }
1067         };
1068
1069         Some((const_scalar_binop(op, lllhs, llrhs, input_ty), of))
1070     } else {
1071         None
1072     }
1073 }
1074
1075 unsafe fn cast_const_float_to_int(cx: &CodegenCx,
1076                                   operand: &Const,
1077                                   signed: bool,
1078                                   int_ty: Type,
1079                                   span: Span) -> ValueRef {
1080     let llval = operand.llval;
1081     let float_bits = match operand.ty.sty {
1082         ty::TyFloat(fty) => fty.bit_width(),
1083         _ => bug!("cast_const_float_to_int: operand not a float"),
1084     };
1085     // Note: this breaks if llval is a complex constant expression rather than a simple constant.
1086     // One way that might happen would be if addresses could be turned into integers in constant
1087     // expressions, but that doesn't appear to be possible?
1088     // In any case, an ICE is better than producing undef.
1089     let llval_bits = consts::bitcast(llval, Type::ix(cx, float_bits as u64));
1090     let bits = const_to_opt_u128(llval_bits, false).unwrap_or_else(|| {
1091         panic!("could not get bits of constant float {:?}",
1092                Value(llval));
1093     });
1094     let int_width = int_ty.int_width() as usize;
1095     // Try to convert, but report an error for overflow and NaN. This matches HIR const eval.
1096     let cast_result = match float_bits {
1097         32 if signed => ieee::Single::from_bits(bits).to_i128(int_width).map(|v| v as u128),
1098         64 if signed => ieee::Double::from_bits(bits).to_i128(int_width).map(|v| v as u128),
1099         32 => ieee::Single::from_bits(bits).to_u128(int_width),
1100         64 => ieee::Double::from_bits(bits).to_u128(int_width),
1101         n => bug!("unsupported float width {}", n),
1102     };
1103     if cast_result.status.contains(Status::INVALID_OP) {
1104         let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast };
1105         err.report(cx.tcx, span, "expression");
1106     }
1107     C_uint_big(int_ty, cast_result.value)
1108 }
1109
1110 unsafe fn cast_const_int_to_float(cx: &CodegenCx,
1111                                   llval: ValueRef,
1112                                   signed: bool,
1113                                   float_ty: Type) -> ValueRef {
1114     // Note: this breaks if llval is a complex constant expression rather than a simple constant.
1115     // One way that might happen would be if addresses could be turned into integers in constant
1116     // expressions, but that doesn't appear to be possible?
1117     // In any case, an ICE is better than producing undef.
1118     let value = const_to_opt_u128(llval, signed).unwrap_or_else(|| {
1119         panic!("could not get z128 value of constant integer {:?}",
1120                Value(llval));
1121     });
1122     if signed {
1123         llvm::LLVMConstSIToFP(llval, float_ty.to_ref())
1124     } else if float_ty.float_width() == 32 && value >= MAX_F32_PLUS_HALF_ULP {
1125         // We're casting to f32 and the value is > f32::MAX + 0.5 ULP -> round up to infinity.
1126         let infinity_bits = C_u32(cx, ieee::Single::INFINITY.to_bits() as u32);
1127         consts::bitcast(infinity_bits, float_ty)
1128     } else {
1129         llvm::LLVMConstUIToFP(llval, float_ty.to_ref())
1130     }
1131 }
1132
1133 impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
1134     pub fn trans_constant(&mut self,
1135                           bx: &Builder<'a, 'tcx>,
1136                           constant: &mir::Constant<'tcx>)
1137                           -> Const<'tcx>
1138     {
1139         debug!("trans_constant({:?})", constant);
1140         let ty = self.monomorphize(&constant.ty);
1141         let result = match constant.literal.clone() {
1142             mir::Literal::Promoted { index } => {
1143                 let mir = &self.mir.promoted[index];
1144                 MirConstContext::new(bx.cx, mir, self.param_substs, IndexVec::new()).trans()
1145             }
1146             mir::Literal::Value { value } => {
1147                 if let ConstVal::Unevaluated(def_id, substs) = value.val {
1148                     let substs = self.monomorphize(&substs);
1149                     MirConstContext::trans_def(bx.cx, def_id, substs, IndexVec::new())
1150                 } else {
1151                     Ok(Const::from_constval(bx.cx, &value.val, ty))
1152                 }
1153             }
1154         };
1155
1156         let result = result.unwrap_or_else(|_| {
1157             // We've errored, so we don't have to produce working code.
1158             let llty = bx.cx.layout_of(ty).llvm_type(bx.cx);
1159             Const::new(C_undef(llty), ty)
1160         });
1161
1162         debug!("trans_constant({:?}) = {:?}", constant, result);
1163         result
1164     }
1165 }
1166
1167
1168 pub fn trans_static_initializer<'a, 'tcx>(
1169     cx: &CodegenCx<'a, 'tcx>,
1170     def_id: DefId)
1171     -> Result<ValueRef, ConstEvalErr<'tcx>>
1172 {
1173     MirConstContext::trans_def(cx, def_id, Substs::empty(), IndexVec::new())
1174         .map(|c| c.llval)
1175 }
1176
1177 /// Construct a constant value, suitable for initializing a
1178 /// GlobalVariable, given a case and constant values for its fields.
1179 /// Note that this may have a different LLVM type (and different
1180 /// alignment!) from the representation's `type_of`, so it needs a
1181 /// pointer cast before use.
1182 ///
1183 /// The LLVM type system does not directly support unions, and only
1184 /// pointers can be bitcast, so a constant (and, by extension, the
1185 /// GlobalVariable initialized by it) will have a type that can vary
1186 /// depending on which case of an enum it is.
1187 ///
1188 /// To understand the alignment situation, consider `enum E { V64(u64),
1189 /// V32(u32, u32) }` on Windows.  The type has 8-byte alignment to
1190 /// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
1191 /// i32, i32}`, which is 4-byte aligned.
1192 ///
1193 /// Currently the returned value has the same size as the type, but
1194 /// this could be changed in the future to avoid allocating unnecessary
1195 /// space after values of shorter-than-maximum cases.
1196 fn trans_const_adt<'a, 'tcx>(
1197     cx: &CodegenCx<'a, 'tcx>,
1198     t: Ty<'tcx>,
1199     kind: &mir::AggregateKind,
1200     vals: &[Const<'tcx>]
1201 ) -> Const<'tcx> {
1202     let l = cx.layout_of(t);
1203     let variant_index = match *kind {
1204         mir::AggregateKind::Adt(_, index, _, _) => index,
1205         _ => 0,
1206     };
1207
1208     if let layout::Abi::Uninhabited = l.abi {
1209         return Const::new(C_undef(l.llvm_type(cx)), t);
1210     }
1211
1212     match l.variants {
1213         layout::Variants::Single { index } => {
1214             assert_eq!(variant_index, index);
1215             if let layout::FieldPlacement::Union(_) = l.fields {
1216                 assert_eq!(variant_index, 0);
1217                 assert_eq!(vals.len(), 1);
1218                 let (field_size, field_align) = cx.size_and_align_of(vals[0].ty);
1219                 let contents = [
1220                     vals[0].llval,
1221                     padding(cx, l.size - field_size)
1222                 ];
1223
1224                 let packed = l.align.abi() < field_align.abi();
1225                 Const::new(C_struct(cx, &contents, packed), t)
1226             } else {
1227                 if let layout::Abi::Vector { .. } = l.abi {
1228                     if let layout::FieldPlacement::Array { .. } = l.fields {
1229                         return Const::new(C_vector(&vals.iter().map(|x| x.llval)
1230                             .collect::<Vec<_>>()), t);
1231                     }
1232                 }
1233                 build_const_struct(cx, l, vals, None)
1234             }
1235         }
1236         layout::Variants::Tagged { .. } => {
1237             let discr = match *kind {
1238                 mir::AggregateKind::Adt(adt_def, _, _, _) => {
1239                     adt_def.discriminant_for_variant(cx.tcx, variant_index)
1240                            .to_u128_unchecked() as u64
1241                 },
1242                 _ => 0,
1243             };
1244             let discr_field = l.field(cx, 0);
1245             let discr = C_int(discr_field.llvm_type(cx), discr as i64);
1246             if let layout::Abi::Scalar(_) = l.abi {
1247                 Const::new(discr, t)
1248             } else {
1249                 let discr = Const::new(discr, discr_field.ty);
1250                 build_const_struct(cx, l.for_variant(cx, variant_index), vals, Some(discr))
1251             }
1252         }
1253         layout::Variants::NicheFilling {
1254             dataful_variant,
1255             ref niche_variants,
1256             niche_start,
1257             ..
1258         } => {
1259             if variant_index == dataful_variant {
1260                 build_const_struct(cx, l.for_variant(cx, dataful_variant), vals, None)
1261             } else {
1262                 let niche = l.field(cx, 0);
1263                 let niche_llty = niche.llvm_type(cx);
1264                 let niche_value = ((variant_index - niche_variants.start) as u128)
1265                     .wrapping_add(niche_start);
1266                 // FIXME(eddyb) Check the actual primitive type here.
1267                 let niche_llval = if niche_value == 0 {
1268                     // HACK(eddyb) Using `C_null` as it works on all types.
1269                     C_null(niche_llty)
1270                 } else {
1271                     C_uint_big(niche_llty, niche_value)
1272                 };
1273                 build_const_struct(cx, l, &[Const::new(niche_llval, niche.ty)], None)
1274             }
1275         }
1276     }
1277 }
1278
1279 /// Building structs is a little complicated, because we might need to
1280 /// insert padding if a field's value is less aligned than its type.
1281 ///
1282 /// Continuing the example from `trans_const_adt`, a value of type `(u32,
1283 /// E)` should have the `E` at offset 8, but if that field's
1284 /// initializer is 4-byte aligned then simply translating the tuple as
1285 /// a two-element struct will locate it at offset 4, and accesses to it
1286 /// will read the wrong memory.
1287 fn build_const_struct<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
1288                                 layout: layout::TyLayout<'tcx>,
1289                                 vals: &[Const<'tcx>],
1290                                 discr: Option<Const<'tcx>>)
1291                                 -> Const<'tcx> {
1292     assert_eq!(vals.len(), layout.fields.count());
1293
1294     match layout.abi {
1295         layout::Abi::Scalar(_) |
1296         layout::Abi::ScalarPair(..) |
1297         layout::Abi::Vector { .. } if discr.is_none() => {
1298             let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| {
1299                 (f, layout.fields.offset(i))
1300             }).filter(|&(f, _)| !cx.layout_of(f.ty).is_zst());
1301             match (non_zst_fields.next(), non_zst_fields.next()) {
1302                 (Some((x, offset)), None) if offset.bytes() == 0 => {
1303                     return Const::new(x.llval, layout.ty);
1304                 }
1305                 (Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => {
1306                     return Const::new(C_struct(cx, &[a.llval, b.llval], false), layout.ty);
1307                 }
1308                 (Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => {
1309                     return Const::new(C_struct(cx, &[b.llval, a.llval], false), layout.ty);
1310                 }
1311                 _ => {}
1312             }
1313         }
1314         _ => {}
1315     }
1316
1317     // offset of current value
1318     let mut packed = false;
1319     let mut offset = Size::from_bytes(0);
1320     let mut cfields = Vec::new();
1321     cfields.reserve(discr.is_some() as usize + 1 + layout.fields.count() * 2);
1322
1323     if let Some(discr) = discr {
1324         let (field_size, field_align) = cx.size_and_align_of(discr.ty);
1325         packed |= layout.align.abi() < field_align.abi();
1326         cfields.push(discr.llval);
1327         offset = field_size;
1328     }
1329
1330     let parts = layout.fields.index_by_increasing_offset().map(|i| {
1331         (vals[i], layout.fields.offset(i))
1332     });
1333     for (val, target_offset) in parts {
1334         let (field_size, field_align) = cx.size_and_align_of(val.ty);
1335         packed |= layout.align.abi() < field_align.abi();
1336         cfields.push(padding(cx, target_offset - offset));
1337         cfields.push(val.llval);
1338         offset = target_offset + field_size;
1339     }
1340
1341     // Pad to the size of the whole type, not e.g. the variant.
1342     cfields.push(padding(cx, cx.size_of(layout.ty) - offset));
1343
1344     Const::new(C_struct(cx, &cfields, packed), layout.ty)
1345 }
1346
1347 fn padding(cx: &CodegenCx, size: Size) -> ValueRef {
1348     C_undef(Type::array(&Type::i8(cx), size.bytes()))
1349 }