1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind};
13 use rustc_const_math::ConstInt::*;
14 use rustc_const_math::{ConstInt, ConstMathErr, MAX_F32_PLUS_HALF_ULP};
15 use rustc::hir::def_id::DefId;
16 use rustc::infer::TransNormalize;
19 use rustc::mir::tcx::LvalueTy;
20 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
21 use rustc::ty::layout::{self, LayoutTyper};
22 use rustc::ty::cast::{CastTy, IntTy};
23 use rustc::ty::subst::{Kind, Substs, Subst};
24 use rustc_apfloat::{ieee, Float, Status};
25 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
26 use {adt, base, machine};
30 use common::{self, CrateContext, const_get_elt, val_ty};
31 use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64};
32 use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, is_undef};
33 use common::const_to_opt_u128;
45 use super::lvalue::Alignment;
46 use super::operand::{OperandRef, OperandValue};
47 use super::MirContext;
49 /// A sized constant rvalue.
50 /// The LLVM type might not be the same for a single Rust type,
51 /// e.g. each enum variant would have its own LLVM struct type.
52 #[derive(Copy, Clone)]
53 pub struct Const<'tcx> {
58 impl<'tcx> Const<'tcx> {
59 pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> {
66 pub fn from_constint<'a>(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt)
69 let (llval, ty) = match *ci {
70 I8(v) => (C_int(Type::i8(ccx), v as i64), tcx.types.i8),
71 I16(v) => (C_int(Type::i16(ccx), v as i64), tcx.types.i16),
72 I32(v) => (C_int(Type::i32(ccx), v as i64), tcx.types.i32),
73 I64(v) => (C_int(Type::i64(ccx), v as i64), tcx.types.i64),
74 I128(v) => (C_big_integral(Type::i128(ccx), v as u128), tcx.types.i128),
75 Isize(v) => (C_int(Type::isize(ccx), v.as_i64()), tcx.types.isize),
76 U8(v) => (C_uint(Type::i8(ccx), v as u64), tcx.types.u8),
77 U16(v) => (C_uint(Type::i16(ccx), v as u64), tcx.types.u16),
78 U32(v) => (C_uint(Type::i32(ccx), v as u64), tcx.types.u32),
79 U64(v) => (C_uint(Type::i64(ccx), v), tcx.types.u64),
80 U128(v) => (C_big_integral(Type::i128(ccx), v), tcx.types.u128),
81 Usize(v) => (C_uint(Type::isize(ccx), v.as_u64()), tcx.types.usize),
83 Const { llval: llval, ty: ty }
86 /// Translate ConstVal into a LLVM constant value.
87 pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>,
91 let llty = type_of::type_of(ccx, ty);
93 ConstVal::Float(v) => {
94 let bits = match v.ty {
95 ast::FloatTy::F32 => C_u32(ccx, v.bits as u32),
96 ast::FloatTy::F64 => C_u64(ccx, v.bits as u64)
98 consts::bitcast(bits, llty)
100 ConstVal::Bool(v) => C_bool(ccx, v),
101 ConstVal::Integral(ref i) => return Const::from_constint(ccx, i),
102 ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
103 ConstVal::ByteStr(v) => consts::addr_of(ccx, C_bytes(ccx, v.data), 1, "byte_str"),
104 ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64),
105 ConstVal::Function(..) => C_null(type_of::type_of(ccx, ty)),
106 ConstVal::Variant(_) |
107 ConstVal::Aggregate(..) |
108 ConstVal::Unevaluated(..) => {
109 bug!("MIR must not use `{:?}` (aggregates are expanded to MIR rvalues)", cv)
113 assert!(!ty.has_erasable_regions());
118 fn get_pair(&self) -> (ValueRef, ValueRef) {
119 (const_get_elt(self.llval, &[0]),
120 const_get_elt(self.llval, &[1]))
123 fn get_fat_ptr(&self) -> (ValueRef, ValueRef) {
124 assert_eq!(abi::FAT_PTR_ADDR, 0);
125 assert_eq!(abi::FAT_PTR_EXTRA, 1);
129 fn as_lvalue(&self) -> ConstLvalue<'tcx> {
131 base: Base::Value(self.llval),
132 llextra: ptr::null_mut(),
137 pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
138 let llty = type_of::immediate_type_of(ccx, self.ty);
139 let llvalty = val_ty(self.llval);
141 let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) {
142 let (a, b) = self.get_pair();
143 OperandValue::Pair(a, b)
144 } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) {
145 // If the types match, we can use the value directly.
146 OperandValue::Immediate(self.llval)
148 // Otherwise, or if the value is not immediate, we create
149 // a constant LLVM global and cast its address if necessary.
150 let align = ccx.align_of(self.ty);
151 let ptr = consts::addr_of(ccx, self.llval, align, "const");
152 OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned)
162 impl<'tcx> fmt::Debug for Const<'tcx> {
163 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
164 write!(f, "Const({:?}: {:?})", Value(self.llval), self.ty)
168 #[derive(Copy, Clone)]
170 /// A constant value without an unique address.
173 /// String literal base pointer (cast from array).
176 /// The address of a static.
180 /// An lvalue as seen from a constant.
181 #[derive(Copy, Clone)]
182 struct ConstLvalue<'tcx> {
188 impl<'tcx> ConstLvalue<'tcx> {
189 fn to_const(&self, span: Span) -> Const<'tcx> {
191 Base::Value(val) => Const::new(val, self.ty),
193 span_bug!(span, "loading from `str` ({:?}) in constant",
196 Base::Static(val) => {
197 span_bug!(span, "loading from `static` ({:?}) in constant",
203 pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
205 ty::TyArray(_, n) => {
206 C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap())
208 ty::TySlice(_) | ty::TyStr => {
209 assert!(self.llextra != ptr::null_mut());
212 _ => bug!("unexpected type `{}` in ConstLvalue::len", self.ty)
217 /// Machinery for translating a constant's MIR to LLVM values.
218 /// FIXME(eddyb) use miri and lower its allocations to LLVM.
219 struct MirConstContext<'a, 'tcx: 'a> {
220 ccx: &'a CrateContext<'a, 'tcx>,
221 mir: &'a mir::Mir<'tcx>,
223 /// Type parameters for const fn and associated constants.
224 substs: &'tcx Substs<'tcx>,
226 /// Values of locals in a constant or const fn.
227 locals: IndexVec<mir::Local, Option<Result<Const<'tcx>, ConstEvalErr<'tcx>>>>
230 fn add_err<'tcx, U, V>(failure: &mut Result<U, ConstEvalErr<'tcx>>,
231 value: &Result<V, ConstEvalErr<'tcx>>)
233 if let &Err(ref err) = value {
235 *failure = Err(err.clone());
240 impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
241 fn new(ccx: &'a CrateContext<'a, 'tcx>,
242 mir: &'a mir::Mir<'tcx>,
243 substs: &'tcx Substs<'tcx>,
244 args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
245 -> MirConstContext<'a, 'tcx> {
246 let mut context = MirConstContext {
250 locals: (0..mir.local_decls.len()).map(|_| None).collect(),
252 for (i, arg) in args.into_iter().enumerate() {
253 // Locals after local 0 are the function arguments
254 let index = mir::Local::new(i + 1);
255 context.locals[index] = Some(arg);
260 fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
262 substs: &'tcx Substs<'tcx>,
263 args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
264 -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
265 let instance = ty::Instance::resolve(ccx.tcx(),
266 ty::ParamEnv::empty(traits::Reveal::All),
269 let mir = ccx.tcx().instance_mir(instance.def);
270 MirConstContext::new(ccx, &mir, instance.substs, args).trans()
273 fn monomorphize<T>(&self, value: &T) -> T
274 where T: TransNormalize<'tcx>
276 self.ccx.tcx().trans_apply_param_substs(self.substs, value)
279 fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
280 let tcx = self.ccx.tcx();
281 let mut bb = mir::START_BLOCK;
283 // Make sure to evaluate all statemenets to
284 // report as many errors as we possibly can.
285 let mut failure = Ok(());
288 let data = &self.mir[bb];
289 for statement in &data.statements {
290 let span = statement.source_info.span;
291 match statement.kind {
292 mir::StatementKind::Assign(ref dest, ref rvalue) => {
293 let ty = dest.ty(self.mir, tcx);
294 let ty = self.monomorphize(&ty).to_ty(tcx);
295 let value = self.const_rvalue(rvalue, ty, span);
296 add_err(&mut failure, &value);
297 self.store(dest, value, span);
299 mir::StatementKind::StorageLive(_) |
300 mir::StatementKind::StorageDead(_) |
301 mir::StatementKind::Validate(..) |
302 mir::StatementKind::EndRegion(_) |
303 mir::StatementKind::Nop => {}
304 mir::StatementKind::InlineAsm { .. } |
305 mir::StatementKind::SetDiscriminant{ .. } => {
306 span_bug!(span, "{:?} should not appear in constants?", statement.kind);
311 let terminator = data.terminator();
312 let span = terminator.source_info.span;
313 bb = match terminator.kind {
314 mir::TerminatorKind::Drop { target, .. } | // No dropping.
315 mir::TerminatorKind::Goto { target } => target,
316 mir::TerminatorKind::Return => {
318 return self.locals[mir::RETURN_POINTER].clone().unwrap_or_else(|| {
319 span_bug!(span, "no returned value in constant");
323 mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => {
324 let cond = self.const_operand(cond, span)?;
325 let cond_bool = common::const_to_uint(cond.llval) != 0;
326 if cond_bool != expected {
327 let err = match *msg {
328 mir::AssertMessage::BoundsCheck { ref len, ref index } => {
329 let len = self.const_operand(len, span)?;
330 let index = self.const_operand(index, span)?;
331 ErrKind::IndexOutOfBounds {
332 len: common::const_to_uint(len.llval),
333 index: common::const_to_uint(index.llval)
336 mir::AssertMessage::Math(ref err) => {
337 ErrKind::Math(err.clone())
339 mir::AssertMessage::GeneratorResumedAfterReturn |
340 mir::AssertMessage::GeneratorResumedAfterPanic =>
341 span_bug!(span, "{:?} should not appear in constants?", msg),
344 let err = ConstEvalErr { span: span, kind: err };
345 err.report(tcx, span, "expression");
351 mir::TerminatorKind::Call { ref func, ref args, ref destination, .. } => {
352 let fn_ty = func.ty(self.mir, tcx);
353 let fn_ty = self.monomorphize(&fn_ty);
354 let (def_id, substs) = match fn_ty.sty {
355 ty::TyFnDef(def_id, substs) => (def_id, substs),
356 _ => span_bug!(span, "calling {:?} (of type {}) in constant",
360 let mut arg_vals = IndexVec::with_capacity(args.len());
362 let arg_val = self.const_operand(arg, span);
363 add_err(&mut failure, &arg_val);
364 arg_vals.push(arg_val);
366 if let Some((ref dest, target)) = *destination {
367 let result = if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic {
368 match &tcx.item_name(def_id)[..] {
370 let llval = C_usize(self.ccx,
371 self.ccx.size_of(substs.type_at(0)));
372 Ok(Const::new(llval, tcx.types.usize))
375 let llval = C_usize(self.ccx,
376 self.ccx.align_of(substs.type_at(0)) as u64);
377 Ok(Const::new(llval, tcx.types.usize))
379 _ => span_bug!(span, "{:?} in constant", terminator.kind)
382 MirConstContext::trans_def(self.ccx, def_id, substs, arg_vals)
384 add_err(&mut failure, &result);
385 self.store(dest, result, span);
388 span_bug!(span, "diverging {:?} in constant", terminator.kind);
391 _ => span_bug!(span, "{:?} in constant", terminator.kind)
397 dest: &mir::Lvalue<'tcx>,
398 value: Result<Const<'tcx>, ConstEvalErr<'tcx>>,
400 if let mir::Lvalue::Local(index) = *dest {
401 self.locals[index] = Some(value);
403 span_bug!(span, "assignment to {:?} in constant", dest);
407 fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
408 -> Result<ConstLvalue<'tcx>, ConstEvalErr<'tcx>> {
409 let tcx = self.ccx.tcx();
411 if let mir::Lvalue::Local(index) = *lvalue {
412 return self.locals[index].clone().unwrap_or_else(|| {
413 span_bug!(span, "{:?} not initialized", lvalue)
414 }).map(|v| v.as_lvalue());
417 let lvalue = match *lvalue {
418 mir::Lvalue::Local(_) => bug!(), // handled above
419 mir::Lvalue::Static(box mir::Static { def_id, ty }) => {
421 base: Base::Static(consts::get_static(self.ccx, def_id)),
422 llextra: ptr::null_mut(),
423 ty: self.monomorphize(&ty),
426 mir::Lvalue::Projection(ref projection) => {
427 let tr_base = self.const_lvalue(&projection.base, span)?;
428 let projected_ty = LvalueTy::Ty { ty: tr_base.ty }
429 .projection_ty(tcx, &projection.elem);
430 let base = tr_base.to_const(span);
431 let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx);
432 let has_metadata = self.ccx.shared().type_has_metadata(projected_ty);
434 let (projected, llextra) = match projection.elem {
435 mir::ProjectionElem::Deref => {
436 let (base, extra) = if !has_metadata {
437 (base.llval, ptr::null_mut())
441 if self.ccx.statics().borrow().contains_key(&base) {
442 (Base::Static(base), extra)
443 } else if let ty::TyStr = projected_ty.sty {
444 (Base::Str(base), extra)
447 let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v);
448 let mut val = unsafe { llvm::LLVMGetInitializer(v) };
450 span_bug!(span, "dereference of non-constant pointer `{:?}`",
453 if projected_ty.is_bool() {
454 let i1_type = Type::i1(self.ccx);
455 if val_ty(val) != i1_type {
457 val = llvm::LLVMConstTrunc(val, i1_type.to_ref());
461 (Base::Value(val), extra)
464 mir::ProjectionElem::Field(ref field, _) => {
465 let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval,
467 let llextra = if !has_metadata {
472 (Base::Value(llprojected), llextra)
474 mir::ProjectionElem::Index(index) => {
475 let index = &mir::Operand::Consume(mir::Lvalue::Local(index));
476 let llindex = self.const_operand(index, span)?.llval;
478 let iv = if let Some(iv) = common::const_to_opt_u128(llindex, false) {
481 span_bug!(span, "index is not an integer-constant expression")
484 // Produce an undef instead of a LLVM assertion on OOB.
485 let len = common::const_to_uint(tr_base.len(self.ccx));
486 let llelem = if iv < len as u128 {
487 const_get_elt(base.llval, &[iv as u32])
489 C_undef(type_of::type_of(self.ccx, projected_ty))
492 (Base::Value(llelem), ptr::null_mut())
494 _ => span_bug!(span, "{:?} in constant", projection.elem)
506 fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
507 -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
508 debug!("const_operand({:?} @ {:?})", operand, span);
509 let result = match *operand {
510 mir::Operand::Consume(ref lvalue) => {
511 Ok(self.const_lvalue(lvalue, span)?.to_const(span))
514 mir::Operand::Constant(ref constant) => {
515 let ty = self.monomorphize(&constant.ty);
516 match constant.literal.clone() {
517 mir::Literal::Promoted { index } => {
518 let mir = &self.mir.promoted[index];
519 MirConstContext::new(self.ccx, mir, self.substs, IndexVec::new()).trans()
521 mir::Literal::Value { value } => {
522 if let ConstVal::Unevaluated(def_id, substs) = value.val {
523 let substs = self.monomorphize(&substs);
524 MirConstContext::trans_def(self.ccx, def_id, substs, IndexVec::new())
526 Ok(Const::from_constval(self.ccx, &value.val, ty))
532 debug!("const_operand({:?} @ {:?}) = {:?}", operand, span,
533 result.as_ref().ok());
537 fn const_array(&self, array_ty: Ty<'tcx>, fields: &[ValueRef])
540 let elem_ty = array_ty.builtin_index().unwrap_or_else(|| {
541 bug!("bad array type {:?}", array_ty)
543 let llunitty = type_of::type_of(self.ccx, elem_ty);
544 // If the array contains enums, an LLVM array won't work.
545 let val = if fields.iter().all(|&f| val_ty(f) == llunitty) {
546 C_array(llunitty, fields)
548 C_struct(self.ccx, fields, false)
550 Const::new(val, array_ty)
553 fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
554 dest_ty: Ty<'tcx>, span: Span)
555 -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
556 let tcx = self.ccx.tcx();
557 debug!("const_rvalue({:?}: {:?} @ {:?})", rvalue, dest_ty, span);
558 let val = match *rvalue {
559 mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
561 mir::Rvalue::Repeat(ref elem, count) => {
562 let elem = self.const_operand(elem, span)?;
563 let size = count.as_u64();
564 assert_eq!(size as usize as u64, size);
565 let fields = vec![elem.llval; size as usize];
566 self.const_array(dest_ty, &fields)
569 mir::Rvalue::Aggregate(ref kind, ref operands) => {
570 // Make sure to evaluate all operands to
571 // report as many errors as we possibly can.
572 let mut fields = Vec::with_capacity(operands.len());
573 let mut failure = Ok(());
574 for operand in operands {
575 match self.const_operand(operand, span) {
576 Ok(val) => fields.push(val.llval),
577 Err(err) => if failure.is_ok() { failure = Err(err); }
583 mir::AggregateKind::Array(_) => {
584 self.const_array(dest_ty, &fields)
586 mir::AggregateKind::Adt(..) |
587 mir::AggregateKind::Closure(..) |
588 mir::AggregateKind::Generator(..) |
589 mir::AggregateKind::Tuple => {
590 Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty)
595 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
596 let operand = self.const_operand(source, span)?;
597 let cast_ty = self.monomorphize(&cast_ty);
599 let val = match *kind {
600 mir::CastKind::ReifyFnPointer => {
601 match operand.ty.sty {
602 ty::TyFnDef(def_id, substs) => {
603 callee::resolve_and_get_fn(self.ccx, def_id, substs)
606 span_bug!(span, "{} cannot be reified to a fn ptr",
611 mir::CastKind::ClosureFnPointer => {
612 match operand.ty.sty {
613 ty::TyClosure(def_id, substs) => {
614 // Get the def_id for FnOnce::call_once
615 let fn_once = tcx.lang_items().fn_once_trait().unwrap();
617 .global_tcx().associated_items(fn_once)
618 .find(|it| it.kind == ty::AssociatedKind::Method)
620 // Now create its substs [Closure, Tuple]
621 let input = tcx.fn_sig(def_id)
622 .subst(tcx, substs.substs).input(0);
623 let input = tcx.erase_late_bound_regions_and_normalize(&input);
624 let substs = tcx.mk_substs([operand.ty, input]
625 .iter().cloned().map(Kind::from));
626 callee::resolve_and_get_fn(self.ccx, call_once, substs)
629 bug!("{} cannot be cast to a fn ptr", operand.ty)
633 mir::CastKind::UnsafeFnPointer => {
634 // this is a no-op at the LLVM level
637 mir::CastKind::Unsize => {
638 // unsize targets other than to a fat pointer currently
639 // can't be in constants.
640 assert!(common::type_is_fat_ptr(self.ccx, cast_ty));
642 let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference)
643 .expect("consts: unsizing got non-pointer type").ty;
644 let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) {
645 // Normally, the source is a thin pointer and we are
646 // adding extra info to make a fat pointer. The exception
647 // is when we are upcasting an existing object fat pointer
648 // to use a different vtable. In that case, we want to
649 // load out the original data pointer so we can repackage
651 let (base, extra) = operand.get_fat_ptr();
654 (operand.llval, None)
657 let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference)
658 .expect("consts: unsizing got non-pointer target type").ty;
659 let ptr_ty = type_of::in_memory_type_of(self.ccx, unsized_ty).ptr_to();
660 let base = consts::ptrcast(base, ptr_ty);
661 let info = base::unsized_info(self.ccx, pointee_ty,
662 unsized_ty, old_info);
664 if old_info.is_none() {
665 let prev_const = self.ccx.const_unsized().borrow_mut()
666 .insert(base, operand.llval);
667 assert!(prev_const.is_none() || prev_const == Some(operand.llval));
669 assert_eq!(abi::FAT_PTR_ADDR, 0);
670 assert_eq!(abi::FAT_PTR_EXTRA, 1);
671 C_struct(self.ccx, &[base, info], false)
673 mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => {
674 debug_assert!(common::type_is_immediate(self.ccx, cast_ty));
675 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
676 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
677 let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty);
678 let llval = operand.llval;
679 let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
680 let l = self.ccx.layout_of(operand.ty);
681 adt::is_discr_signed(&l)
683 operand.ty.is_signed()
687 match (r_t_in, r_t_out) {
688 (CastTy::Int(_), CastTy::Int(_)) => {
689 let s = signed as llvm::Bool;
690 llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s)
692 (CastTy::Int(_), CastTy::Float) => {
693 cast_const_int_to_float(self.ccx, llval, signed, ll_t_out)
695 (CastTy::Float, CastTy::Float) => {
696 llvm::LLVMConstFPCast(llval, ll_t_out.to_ref())
698 (CastTy::Float, CastTy::Int(IntTy::I)) => {
699 cast_const_float_to_int(self.ccx, &operand,
700 true, ll_t_out, span)
702 (CastTy::Float, CastTy::Int(_)) => {
703 cast_const_float_to_int(self.ccx, &operand,
704 false, ll_t_out, span)
706 (CastTy::Ptr(_), CastTy::Ptr(_)) |
707 (CastTy::FnPtr, CastTy::Ptr(_)) |
708 (CastTy::RPtr(_), CastTy::Ptr(_)) => {
709 consts::ptrcast(llval, ll_t_out)
711 (CastTy::Int(_), CastTy::Ptr(_)) => {
712 llvm::LLVMConstIntToPtr(llval, ll_t_out.to_ref())
714 (CastTy::Ptr(_), CastTy::Int(_)) |
715 (CastTy::FnPtr, CastTy::Int(_)) => {
716 llvm::LLVMConstPtrToInt(llval, ll_t_out.to_ref())
718 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
722 mir::CastKind::Misc => { // Casts from a fat-ptr.
723 let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty);
724 let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty);
725 if common::type_is_fat_ptr(self.ccx, operand.ty) {
726 let (data_ptr, meta_ptr) = operand.get_fat_ptr();
727 if common::type_is_fat_ptr(self.ccx, cast_ty) {
728 let ll_cft = ll_cast_ty.field_types();
729 let ll_fft = ll_from_ty.field_types();
730 let data_cast = consts::ptrcast(data_ptr, ll_cft[0]);
731 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
732 C_struct(self.ccx, &[data_cast, meta_ptr], false)
733 } else { // cast to thin-ptr
734 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
735 // pointer-cast of that pointer to desired pointer type.
736 consts::ptrcast(data_ptr, ll_cast_ty)
739 bug!("Unexpected non-fat-pointer operand")
743 Const::new(val, cast_ty)
746 mir::Rvalue::Ref(_, bk, ref lvalue) => {
747 let tr_lvalue = self.const_lvalue(lvalue, span)?;
749 let ty = tr_lvalue.ty;
750 let ref_ty = tcx.mk_ref(tcx.types.re_erased,
751 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
753 let base = match tr_lvalue.base {
754 Base::Value(llval) => {
755 // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
756 let align = if self.ccx.shared().type_is_sized(ty) {
757 self.ccx.align_of(ty)
759 self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign
761 if bk == mir::BorrowKind::Mut {
762 consts::addr_of_mut(self.ccx, llval, align, "ref_mut")
764 consts::addr_of(self.ccx, llval, align, "ref")
768 Base::Static(llval) => llval
771 let ptr = if self.ccx.shared().type_is_sized(ty) {
774 C_struct(self.ccx, &[base, tr_lvalue.llextra], false)
776 Const::new(ptr, ref_ty)
779 mir::Rvalue::Len(ref lvalue) => {
780 let tr_lvalue = self.const_lvalue(lvalue, span)?;
781 Const::new(tr_lvalue.len(self.ccx), tcx.types.usize)
784 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
785 let lhs = self.const_operand(lhs, span)?;
786 let rhs = self.const_operand(rhs, span)?;
788 let binop_ty = op.ty(tcx, lhs.ty, rhs.ty);
789 let (lhs, rhs) = (lhs.llval, rhs.llval);
790 Const::new(const_scalar_binop(op, lhs, rhs, ty), binop_ty)
793 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
794 let lhs = self.const_operand(lhs, span)?;
795 let rhs = self.const_operand(rhs, span)?;
797 let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
798 let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool], false);
799 let (lhs, rhs) = (lhs.llval, rhs.llval);
800 assert!(!ty.is_fp());
802 match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
803 Some((llval, of)) => {
804 let llof = C_bool(self.ccx, of);
805 Const::new(C_struct(self.ccx, &[llval, llof], false), binop_ty)
808 span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}",
809 rvalue, Value(lhs), Value(rhs));
814 mir::Rvalue::UnaryOp(op, ref operand) => {
815 let operand = self.const_operand(operand, span)?;
816 let lloperand = operand.llval;
817 let llval = match op {
820 llvm::LLVMConstNot(lloperand)
824 let is_float = operand.ty.is_fp();
827 llvm::LLVMConstFNeg(lloperand)
829 llvm::LLVMConstNeg(lloperand)
834 Const::new(llval, operand.ty)
837 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
838 assert!(self.ccx.shared().type_is_sized(ty));
839 let llval = C_usize(self.ccx, self.ccx.size_of(ty));
840 Const::new(llval, tcx.types.usize)
843 _ => span_bug!(span, "{:?} in constant", rvalue)
846 debug!("const_rvalue({:?}: {:?} @ {:?}) = {:?}", rvalue, dest_ty, span, val);
853 fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
855 ty::TyInt(int_type) => const_to_opt_u128(value, true)
856 .and_then(|input| ConstInt::new_signed(input as i128, int_type,
857 tcx.sess.target.isize_ty)),
858 ty::TyUint(uint_type) => const_to_opt_u128(value, false)
859 .and_then(|input| ConstInt::new_unsigned(input, uint_type,
860 tcx.sess.target.usize_ty)),
866 pub fn const_scalar_binop(op: mir::BinOp,
869 input_ty: Ty) -> ValueRef {
870 assert!(!input_ty.is_simd());
871 let is_float = input_ty.is_fp();
872 let signed = input_ty.is_signed();
876 mir::BinOp::Add if is_float => llvm::LLVMConstFAdd(lhs, rhs),
877 mir::BinOp::Add => llvm::LLVMConstAdd(lhs, rhs),
879 mir::BinOp::Sub if is_float => llvm::LLVMConstFSub(lhs, rhs),
880 mir::BinOp::Sub => llvm::LLVMConstSub(lhs, rhs),
882 mir::BinOp::Mul if is_float => llvm::LLVMConstFMul(lhs, rhs),
883 mir::BinOp::Mul => llvm::LLVMConstMul(lhs, rhs),
885 mir::BinOp::Div if is_float => llvm::LLVMConstFDiv(lhs, rhs),
886 mir::BinOp::Div if signed => llvm::LLVMConstSDiv(lhs, rhs),
887 mir::BinOp::Div => llvm::LLVMConstUDiv(lhs, rhs),
889 mir::BinOp::Rem if is_float => llvm::LLVMConstFRem(lhs, rhs),
890 mir::BinOp::Rem if signed => llvm::LLVMConstSRem(lhs, rhs),
891 mir::BinOp::Rem => llvm::LLVMConstURem(lhs, rhs),
893 mir::BinOp::BitXor => llvm::LLVMConstXor(lhs, rhs),
894 mir::BinOp::BitAnd => llvm::LLVMConstAnd(lhs, rhs),
895 mir::BinOp::BitOr => llvm::LLVMConstOr(lhs, rhs),
897 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
898 llvm::LLVMConstShl(lhs, rhs)
901 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
902 if signed { llvm::LLVMConstAShr(lhs, rhs) }
903 else { llvm::LLVMConstLShr(lhs, rhs) }
905 mir::BinOp::Eq | mir::BinOp::Ne |
906 mir::BinOp::Lt | mir::BinOp::Le |
907 mir::BinOp::Gt | mir::BinOp::Ge => {
909 let cmp = base::bin_op_to_fcmp_predicate(op.to_hir_binop());
910 llvm::LLVMConstFCmp(cmp, lhs, rhs)
912 let cmp = base::bin_op_to_icmp_predicate(op.to_hir_binop(),
914 llvm::LLVMConstICmp(cmp, lhs, rhs)
917 mir::BinOp::Offset => unreachable!("BinOp::Offset in const-eval!")
922 pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
927 -> Option<(ValueRef, bool)> {
928 if let (Some(lhs), Some(rhs)) = (to_const_int(lllhs, input_ty, tcx),
929 to_const_int(llrhs, input_ty, tcx)) {
930 let result = match op {
931 mir::BinOp::Add => lhs + rhs,
932 mir::BinOp::Sub => lhs - rhs,
933 mir::BinOp::Mul => lhs * rhs,
934 mir::BinOp::Shl => lhs << rhs,
935 mir::BinOp::Shr => lhs >> rhs,
937 bug!("Operator `{:?}` is not a checkable operator", op)
941 let of = match result {
943 Err(ConstMathErr::Overflow(_)) |
944 Err(ConstMathErr::ShiftNegative) => true,
946 bug!("Operator `{:?}` on `{:?}` and `{:?}` errored: {}",
947 op, lhs, rhs, err.description());
951 Some((const_scalar_binop(op, lllhs, llrhs, input_ty), of))
957 unsafe fn cast_const_float_to_int(ccx: &CrateContext,
961 span: Span) -> ValueRef {
962 let llval = operand.llval;
963 let float_bits = match operand.ty.sty {
964 ty::TyFloat(fty) => fty.bit_width(),
965 _ => bug!("cast_const_float_to_int: operand not a float"),
967 // Note: this breaks if llval is a complex constant expression rather than a simple constant.
968 // One way that might happen would be if addresses could be turned into integers in constant
969 // expressions, but that doesn't appear to be possible?
970 // In any case, an ICE is better than producing undef.
971 let llval_bits = consts::bitcast(llval, Type::ix(ccx, float_bits as u64));
972 let bits = const_to_opt_u128(llval_bits, false).unwrap_or_else(|| {
973 panic!("could not get bits of constant float {:?}",
976 let int_width = int_ty.int_width() as usize;
977 // Try to convert, but report an error for overflow and NaN. This matches HIR const eval.
978 let cast_result = match float_bits {
979 32 if signed => ieee::Single::from_bits(bits).to_i128(int_width).map(|v| v as u128),
980 64 if signed => ieee::Double::from_bits(bits).to_i128(int_width).map(|v| v as u128),
981 32 => ieee::Single::from_bits(bits).to_u128(int_width),
982 64 => ieee::Double::from_bits(bits).to_u128(int_width),
983 n => bug!("unsupported float width {}", n),
985 if cast_result.status.contains(Status::INVALID_OP) {
986 let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast };
987 err.report(ccx.tcx(), span, "expression");
989 C_big_integral(int_ty, cast_result.value)
992 unsafe fn cast_const_int_to_float(ccx: &CrateContext,
995 float_ty: Type) -> ValueRef {
996 // Note: this breaks if llval is a complex constant expression rather than a simple constant.
997 // One way that might happen would be if addresses could be turned into integers in constant
998 // expressions, but that doesn't appear to be possible?
999 // In any case, an ICE is better than producing undef.
1000 let value = const_to_opt_u128(llval, signed).unwrap_or_else(|| {
1001 panic!("could not get z128 value of constant integer {:?}",
1005 llvm::LLVMConstSIToFP(llval, float_ty.to_ref())
1006 } else if float_ty.float_width() == 32 && value >= MAX_F32_PLUS_HALF_ULP {
1007 // We're casting to f32 and the value is > f32::MAX + 0.5 ULP -> round up to infinity.
1008 let infinity_bits = C_u32(ccx, ieee::Single::INFINITY.to_bits() as u32);
1009 consts::bitcast(infinity_bits, float_ty)
1011 llvm::LLVMConstUIToFP(llval, float_ty.to_ref())
1015 impl<'a, 'tcx> MirContext<'a, 'tcx> {
1016 pub fn trans_constant(&mut self,
1017 bcx: &Builder<'a, 'tcx>,
1018 constant: &mir::Constant<'tcx>)
1021 debug!("trans_constant({:?})", constant);
1022 let ty = self.monomorphize(&constant.ty);
1023 let result = match constant.literal.clone() {
1024 mir::Literal::Promoted { index } => {
1025 let mir = &self.mir.promoted[index];
1026 MirConstContext::new(bcx.ccx, mir, self.param_substs, IndexVec::new()).trans()
1028 mir::Literal::Value { value } => {
1029 if let ConstVal::Unevaluated(def_id, substs) = value.val {
1030 let substs = self.monomorphize(&substs);
1031 MirConstContext::trans_def(bcx.ccx, def_id, substs, IndexVec::new())
1033 Ok(Const::from_constval(bcx.ccx, &value.val, ty))
1038 let result = result.unwrap_or_else(|_| {
1039 // We've errored, so we don't have to produce working code.
1040 let llty = type_of::type_of(bcx.ccx, ty);
1041 Const::new(C_undef(llty), ty)
1044 debug!("trans_constant({:?}) = {:?}", constant, result);
1050 pub fn trans_static_initializer<'a, 'tcx>(
1051 ccx: &CrateContext<'a, 'tcx>,
1053 -> Result<ValueRef, ConstEvalErr<'tcx>>
1055 MirConstContext::trans_def(ccx, def_id, Substs::empty(), IndexVec::new())
1059 /// Construct a constant value, suitable for initializing a
1060 /// GlobalVariable, given a case and constant values for its fields.
1061 /// Note that this may have a different LLVM type (and different
1062 /// alignment!) from the representation's `type_of`, so it needs a
1063 /// pointer cast before use.
1065 /// The LLVM type system does not directly support unions, and only
1066 /// pointers can be bitcast, so a constant (and, by extension, the
1067 /// GlobalVariable initialized by it) will have a type that can vary
1068 /// depending on which case of an enum it is.
1070 /// To understand the alignment situation, consider `enum E { V64(u64),
1071 /// V32(u32, u32) }` on Windows. The type has 8-byte alignment to
1072 /// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
1073 /// i32, i32}`, which is 4-byte aligned.
1075 /// Currently the returned value has the same size as the type, but
1076 /// this could be changed in the future to avoid allocating unnecessary
1077 /// space after values of shorter-than-maximum cases.
1078 fn trans_const<'a, 'tcx>(
1079 ccx: &CrateContext<'a, 'tcx>,
1081 kind: &mir::AggregateKind,
1084 let l = ccx.layout_of(t);
1085 let variant_index = match *kind {
1086 mir::AggregateKind::Adt(_, index, _, _) => index,
1090 layout::CEnum { discr: d, min, max, .. } => {
1091 let discr = match *kind {
1092 mir::AggregateKind::Adt(adt_def, _, _, _) => {
1093 adt_def.discriminant_for_variant(ccx.tcx(), variant_index)
1094 .to_u128_unchecked() as u64
1098 assert_eq!(vals.len(), 0);
1099 adt::assert_discr_in_range(min, max, discr);
1100 C_int(Type::from_integer(ccx, d), discr as i64)
1102 layout::General { discr: d, ref variants, .. } => {
1103 let variant = &variants[variant_index];
1104 let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64);
1105 let mut vals_with_discr = vec![lldiscr];
1106 vals_with_discr.extend_from_slice(vals);
1107 let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]);
1108 let needed_padding = l.size(ccx).bytes() - variant.stride().bytes();
1109 if needed_padding > 0 {
1110 contents.push(padding(ccx, needed_padding));
1112 C_struct(ccx, &contents[..], false)
1114 layout::UntaggedUnion { ref variants, .. }=> {
1115 assert_eq!(variant_index, 0);
1116 let contents = build_const_union(ccx, variants, vals[0]);
1117 C_struct(ccx, &contents, variants.packed)
1119 layout::Univariant { ref variant, .. } => {
1120 assert_eq!(variant_index, 0);
1121 let contents = build_const_struct(ccx, &variant, vals);
1122 C_struct(ccx, &contents[..], variant.packed)
1124 layout::Vector { .. } => {
1127 layout::RawNullablePointer { nndiscr, .. } => {
1128 if variant_index as u64 == nndiscr {
1129 assert_eq!(vals.len(), 1);
1132 C_null(type_of::type_of(ccx, t))
1135 layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
1136 if variant_index as u64 == nndiscr {
1137 C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false)
1139 // Always use null even if it's not the `discrfield`th
1140 // field; see #8506.
1141 C_null(type_of::type_of(ccx, t))
1144 _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l)
1148 /// Building structs is a little complicated, because we might need to
1149 /// insert padding if a field's value is less aligned than its type.
1151 /// Continuing the example from `trans_const`, a value of type `(u32,
1152 /// E)` should have the `E` at offset 8, but if that field's
1153 /// initializer is 4-byte aligned then simply translating the tuple as
1154 /// a two-element struct will locate it at offset 4, and accesses to it
1155 /// will read the wrong memory.
1156 fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1157 st: &layout::Struct,
1160 assert_eq!(vals.len(), st.offsets.len());
1162 if vals.len() == 0 {
1166 // offset of current value
1168 let mut cfields = Vec::new();
1169 cfields.reserve(st.offsets.len()*2);
1171 let parts = st.field_index_by_increasing_offset().map(|i| {
1172 (&vals[i], st.offsets[i].bytes())
1174 for (&val, target_offset) in parts {
1175 if offset < target_offset {
1176 cfields.push(padding(ccx, target_offset - offset));
1177 offset = target_offset;
1179 assert!(!is_undef(val));
1181 offset += machine::llsize_of_alloc(ccx, val_ty(val));
1184 if offset < st.stride().bytes() {
1185 cfields.push(padding(ccx, st.stride().bytes() - offset));
1191 fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1193 field_val: ValueRef)
1195 let mut cfields = vec![field_val];
1197 let offset = machine::llsize_of_alloc(ccx, val_ty(field_val));
1198 let size = un.stride().bytes();
1200 cfields.push(padding(ccx, size - offset));
1206 fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
1207 C_undef(Type::array(&Type::i8(ccx), size))