1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind};
13 use rustc_const_math::{ConstInt, ConstMathErr, MAX_F32_PLUS_HALF_ULP};
14 use rustc::hir::def_id::DefId;
15 use rustc::infer::TransNormalize;
18 use rustc::mir::interpret::{Value as MiriValue, PrimVal};
19 use rustc::mir::tcx::PlaceTy;
20 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
21 use rustc::ty::layout::{self, LayoutOf, Size};
22 use rustc::ty::cast::{CastTy, IntTy};
23 use rustc::ty::subst::{Kind, Substs};
24 use rustc_apfloat::{ieee, Float, Status};
25 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
30 use common::{self, CodegenCx, const_get_elt, val_ty};
31 use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_uint_big, C_u32, C_u64};
32 use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr};
33 use common::const_to_opt_u128;
35 use type_of::LayoutLlvmExt;
41 use syntax::symbol::Symbol;
46 use super::operand::{OperandRef, OperandValue};
47 use super::FunctionCx;
49 /// A sized constant rvalue.
50 /// The LLVM type might not be the same for a single Rust type,
51 /// e.g. each enum variant would have its own LLVM struct type.
52 #[derive(Copy, Clone)]
53 pub struct Const<'tcx> {
58 impl<'a, 'tcx> Const<'tcx> {
59 pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> {
66 pub fn from_bytes(ccx: &CrateContext<'a, 'tcx>, b: u128, ty: Ty<'tcx>) -> Const<'tcx> {
67 let llval = match ty.sty {
68 ty::TyInt(ast::IntTy::I128) |
69 ty::TyUint(ast::UintTy::U128) => C_uint_big(Type::i128(ccx), b),
70 ty::TyInt(i) => C_int(Type::int_from_ty(ccx, i), b as i128 as i64),
71 ty::TyUint(u) => C_uint(Type::uint_from_ty(ccx, u), b as u64),
77 assert_eq!(b as u32 as u128, b);
79 assert!(::std::char::from_u32(c).is_some());
80 C_uint(Type::char(ccx), c as u64)
83 let llty = ccx.layout_of(ty).llvm_type(ccx);
84 let bits = match fty {
85 ast::FloatTy::F32 => C_u32(ccx, b as u32),
86 ast::FloatTy::F64 => C_u64(ccx, b as u64),
88 consts::bitcast(bits, llty)
90 ty::TyAdt(adt, _) if adt.is_enum() => {
91 use rustc::ty::util::IntTypeExt;
92 Const::from_bytes(ccx, b, adt.repr.discr_type().to_ty(ccx.tcx())).llval
94 _ => bug!("from_bytes({}, {})", b, ty),
99 /// Translate ConstVal into a LLVM constant value.
100 pub fn from_constval(cx: &CodegenCx<'a, 'tcx>,
104 let llty = cx.layout_of(ty).llvm_type(cx);
105 trace!("from_constval: {:#?}: {}", cv, ty);
106 let val = match *cv {
107 ConstVal::Unevaluated(..) => unimplemented!("const val `{:?}`", cv),
108 ConstVal::Value(MiriValue::ByRef(..)) => unimplemented!("{:#?}:{}", cv, ty),
109 ConstVal::Value(MiriValue::ByValPair(PrimVal::Ptr(ptr), PrimVal::Bytes(len))) => {
111 ty::TyRef(_, ref tam) => match tam.ty.sty {
113 _ => unimplemented!("non-str fat pointer: {:?}: {:?}", ptr, ty),
115 _ => unimplemented!("non-str fat pointer: {:?}: {:?}", ptr, ty),
121 .get_alloc(ptr.alloc_id)
122 .expect("miri alloc not found");
123 assert_eq!(len as usize as u128, len);
124 let slice = &alloc.bytes[(ptr.offset as usize)..][..(len as usize)];
125 let s = ::std::str::from_utf8(slice)
126 .expect("non utf8 str from miri");
127 C_str_slice(ccx, Symbol::intern(s).as_str())
129 ConstVal::Value(MiriValue::ByValPair(..)) => unimplemented!(),
130 ConstVal::Value(MiriValue::ByVal(PrimVal::Bytes(b))) =>
131 return Const::from_bytes(ccx, b, ty),
132 ConstVal::Value(MiriValue::ByVal(PrimVal::Undef)) => C_undef(llty),
133 ConstVal::Value(MiriValue::ByVal(PrimVal::Ptr(ptr))) => {
138 .get_alloc(ptr.alloc_id)
139 .expect("miri alloc not found");
140 let data = &alloc.bytes[(ptr.offset as usize)..];
141 consts::addr_of(ccx, C_bytes(ccx, data), ccx.align_of(ty), "byte_str")
145 assert!(!ty.has_erasable_regions());
150 fn get_field(&self, cx: &CodegenCx<'a, 'tcx>, i: usize) -> ValueRef {
151 let layout = cx.layout_of(self.ty);
152 let field = layout.field(cx, i);
154 return C_undef(field.immediate_llvm_type(cx));
156 let offset = layout.fields.offset(i);
158 layout::Abi::Scalar(_) |
159 layout::Abi::ScalarPair(..) |
160 layout::Abi::Vector { .. }
161 if offset.bytes() == 0 && field.size == layout.size => self.llval,
163 layout::Abi::ScalarPair(ref a, ref b) => {
164 if offset.bytes() == 0 {
165 assert_eq!(field.size, a.value.size(cx));
166 const_get_elt(self.llval, 0)
168 assert_eq!(offset, a.value.size(cx)
169 .abi_align(b.value.align(cx)));
170 assert_eq!(field.size, b.value.size(cx));
171 const_get_elt(self.llval, 1)
175 match layout.fields {
176 layout::FieldPlacement::Union(_) => self.llval,
177 _ => const_get_elt(self.llval, layout.llvm_field_index(i)),
183 fn get_pair(&self, cx: &CodegenCx<'a, 'tcx>) -> (ValueRef, ValueRef) {
184 (self.get_field(cx, 0), self.get_field(cx, 1))
187 fn get_fat_ptr(&self, cx: &CodegenCx<'a, 'tcx>) -> (ValueRef, ValueRef) {
188 assert_eq!(abi::FAT_PTR_ADDR, 0);
189 assert_eq!(abi::FAT_PTR_EXTRA, 1);
193 fn as_place(&self) -> ConstPlace<'tcx> {
195 base: Base::Value(self.llval),
196 llextra: ptr::null_mut(),
201 pub fn to_operand(&self, cx: &CodegenCx<'a, 'tcx>) -> OperandRef<'tcx> {
202 let layout = cx.layout_of(self.ty);
203 let llty = layout.immediate_llvm_type(cx);
204 let llvalty = val_ty(self.llval);
206 let val = if llty == llvalty && layout.is_llvm_scalar_pair() {
208 const_get_elt(self.llval, 0),
209 const_get_elt(self.llval, 1))
210 } else if llty == llvalty && layout.is_llvm_immediate() {
211 // If the types match, we can use the value directly.
212 OperandValue::Immediate(self.llval)
214 // Otherwise, or if the value is not immediate, we create
215 // a constant LLVM global and cast its address if necessary.
216 let align = cx.align_of(self.ty);
217 let ptr = consts::addr_of(cx, self.llval, align, "const");
218 OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(cx).ptr_to()),
229 impl<'tcx> fmt::Debug for Const<'tcx> {
230 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
231 write!(f, "Const({:?}: {:?})", Value(self.llval), self.ty)
235 #[derive(Copy, Clone)]
237 /// A constant value without an unique address.
240 /// String literal base pointer (cast from array).
243 /// The address of a static.
247 /// A place as seen from a constant.
248 #[derive(Copy, Clone)]
249 struct ConstPlace<'tcx> {
255 impl<'tcx> ConstPlace<'tcx> {
256 fn to_const(&self, span: Span) -> Const<'tcx> {
258 Base::Value(val) => Const::new(val, self.ty),
260 span_bug!(span, "loading from `str` ({:?}) in constant",
263 Base::Static(val) => {
264 span_bug!(span, "loading from `static` ({:?}) in constant",
270 pub fn len<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> ValueRef {
272 ty::TyArray(_, n) => {
273 C_usize(cx, n.val.unwrap_u64())
275 ty::TySlice(_) | ty::TyStr => {
276 assert!(self.llextra != ptr::null_mut());
279 _ => bug!("unexpected type `{}` in ConstPlace::len", self.ty)
284 /// Machinery for translating a constant's MIR to LLVM values.
285 /// FIXME(eddyb) use miri and lower its allocations to LLVM.
286 struct MirConstContext<'a, 'tcx: 'a> {
287 cx: &'a CodegenCx<'a, 'tcx>,
288 mir: &'a mir::Mir<'tcx>,
290 /// Type parameters for const fn and associated constants.
291 substs: &'tcx Substs<'tcx>,
293 /// Values of locals in a constant or const fn.
294 locals: IndexVec<mir::Local, Option<Result<Const<'tcx>, ConstEvalErr<'tcx>>>>
297 fn add_err<'tcx, U, V>(failure: &mut Result<U, ConstEvalErr<'tcx>>,
298 value: &Result<V, ConstEvalErr<'tcx>>)
300 if let &Err(ref err) = value {
302 *failure = Err(err.clone());
307 impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
308 fn new(cx: &'a CodegenCx<'a, 'tcx>,
309 mir: &'a mir::Mir<'tcx>,
310 substs: &'tcx Substs<'tcx>,
311 args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
312 -> MirConstContext<'a, 'tcx> {
313 let mut context = MirConstContext {
317 locals: (0..mir.local_decls.len()).map(|_| None).collect(),
319 for (i, arg) in args.into_iter().enumerate() {
320 // Locals after local 0 are the function arguments
321 let index = mir::Local::new(i + 1);
322 context.locals[index] = Some(arg);
327 fn trans_def(cx: &'a CodegenCx<'a, 'tcx>,
329 substs: &'tcx Substs<'tcx>,
330 args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
331 -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
332 let instance = ty::Instance::resolve(cx.tcx,
333 ty::ParamEnv::empty(traits::Reveal::All),
336 let mir = cx.tcx.instance_mir(instance.def);
337 MirConstContext::new(cx, &mir, instance.substs, args).trans()
340 fn monomorphize<T>(&self, value: &T) -> T
341 where T: TransNormalize<'tcx>
343 self.cx.tcx.trans_apply_param_substs(self.substs, value)
346 fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
347 let tcx = self.cx.tcx;
348 let mut bb = mir::START_BLOCK;
350 // Make sure to evaluate all statements to
351 // report as many errors as we possibly can.
352 let mut failure = Ok(());
355 let data = &self.mir[bb];
356 for statement in &data.statements {
357 let span = statement.source_info.span;
358 match statement.kind {
359 mir::StatementKind::Assign(ref dest, ref rvalue) => {
360 let ty = dest.ty(self.mir, tcx);
361 let ty = self.monomorphize(&ty).to_ty(tcx);
362 let value = self.const_rvalue(rvalue, ty, span);
363 add_err(&mut failure, &value);
364 self.store(dest, value, span);
366 mir::StatementKind::StorageLive(_) |
367 mir::StatementKind::StorageDead(_) |
368 mir::StatementKind::Validate(..) |
369 mir::StatementKind::EndRegion(_) |
370 mir::StatementKind::Nop => {}
371 mir::StatementKind::InlineAsm { .. } |
372 mir::StatementKind::SetDiscriminant{ .. } => {
373 span_bug!(span, "{:?} should not appear in constants?", statement.kind);
378 let terminator = data.terminator();
379 let span = terminator.source_info.span;
380 bb = match terminator.kind {
381 mir::TerminatorKind::Drop { target, .. } | // No dropping.
382 mir::TerminatorKind::Goto { target } => target,
383 mir::TerminatorKind::Return => {
385 return self.locals[mir::RETURN_PLACE].clone().unwrap_or_else(|| {
386 span_bug!(span, "no returned value in constant");
390 mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => {
391 let cond = self.const_operand(cond, span)?;
392 let cond_bool = common::const_to_uint(cond.llval) != 0;
393 if cond_bool != expected {
394 let err = match *msg {
395 mir::AssertMessage::BoundsCheck { ref len, ref index } => {
396 let len = self.const_operand(len, span)?;
397 let index = self.const_operand(index, span)?;
398 ErrKind::IndexOutOfBounds {
399 len: common::const_to_uint(len.llval),
400 index: common::const_to_uint(index.llval)
403 mir::AssertMessage::Math(ref err) => {
404 ErrKind::Math(err.clone())
406 mir::AssertMessage::GeneratorResumedAfterReturn |
407 mir::AssertMessage::GeneratorResumedAfterPanic =>
408 span_bug!(span, "{:?} should not appear in constants?", msg),
411 let err = ConstEvalErr { span: span, kind: err };
412 err.report(tcx, span, "expression");
418 mir::TerminatorKind::Call { ref func, ref args, ref destination, .. } => {
419 let fn_ty = func.ty(self.mir, tcx);
420 let fn_ty = self.monomorphize(&fn_ty);
421 let (def_id, substs) = match fn_ty.sty {
422 ty::TyFnDef(def_id, substs) => (def_id, substs),
423 _ => span_bug!(span, "calling {:?} (of type {}) in constant",
426 trace!("trans const fn call {:?}, {:?}, {:#?}", func, fn_ty, args);
428 let mut arg_vals = IndexVec::with_capacity(args.len());
430 let arg_val = self.const_operand(arg, span);
431 add_err(&mut failure, &arg_val);
432 arg_vals.push(arg_val);
434 if let Some((ref dest, target)) = *destination {
435 let result = if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic {
436 match &tcx.item_name(def_id)[..] {
438 let llval = C_usize(self.cx,
439 self.cx.size_of(substs.type_at(0)).bytes());
440 Ok(Const::new(llval, tcx.types.usize))
443 let llval = C_usize(self.cx,
444 self.cx.align_of(substs.type_at(0)).abi());
445 Ok(Const::new(llval, tcx.types.usize))
448 let llval = C_u64(self.cx,
449 self.cx.tcx.type_id_hash(substs.type_at(0)));
450 Ok(Const::new(llval, tcx.types.u64))
452 _ => span_bug!(span, "{:?} in constant", terminator.kind)
454 } else if let Some((op, is_checked)) = tcx.is_binop_lang_item(def_id) {
456 assert_eq!(arg_vals.len(), 2);
457 let rhs = arg_vals.pop().unwrap()?;
458 let lhs = arg_vals.pop().unwrap()?;
460 let binop_ty = op.ty(tcx, lhs.ty, rhs.ty);
461 let (lhs, rhs) = (lhs.llval, rhs.llval);
462 Ok(Const::new(const_scalar_binop(op, lhs, rhs, binop_ty),
466 let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
467 let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool], false);
468 let (lhs, rhs) = (lhs.llval, rhs.llval);
469 assert!(!ty.is_fp());
471 match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
472 Some((llval, of)) => {
476 &mir::AggregateKind::Tuple,
478 Const::new(llval, val_ty),
479 Const::new(C_bool(self.cx, of), tcx.types.bool)
484 "{:?} got non-integer operands: {:?} and {:?}",
485 op, Value(lhs), Value(rhs));
491 MirConstContext::trans_def(self.cx, def_id, substs, arg_vals)
493 add_err(&mut failure, &result);
494 self.store(dest, result, span);
497 span_bug!(span, "diverging {:?} in constant", terminator.kind);
500 _ => span_bug!(span, "{:?} in constant", terminator.kind)
506 dest: &mir::Place<'tcx>,
507 value: Result<Const<'tcx>, ConstEvalErr<'tcx>>,
509 if let mir::Place::Local(index) = *dest {
510 self.locals[index] = Some(value);
512 span_bug!(span, "assignment to {:?} in constant", dest);
516 fn const_place(&self, place: &mir::Place<'tcx>, span: Span)
517 -> Result<ConstPlace<'tcx>, ConstEvalErr<'tcx>> {
518 let tcx = self.cx.tcx;
520 if let mir::Place::Local(index) = *place {
521 return self.locals[index].clone().unwrap_or_else(|| {
522 span_bug!(span, "{:?} not initialized", place)
523 }).map(|v| v.as_place());
526 let place = match *place {
527 mir::Place::Local(_) => bug!(), // handled above
528 mir::Place::Static(box mir::Static { def_id, ty }) => {
530 base: Base::Static(consts::get_static(self.cx, def_id)),
531 llextra: ptr::null_mut(),
532 ty: self.monomorphize(&ty),
535 mir::Place::Projection(ref projection) => {
536 let tr_base = self.const_place(&projection.base, span)?;
537 let projected_ty = PlaceTy::Ty { ty: tr_base.ty }
538 .projection_ty(tcx, &projection.elem);
539 let base = tr_base.to_const(span);
540 let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx);
541 let has_metadata = self.cx.type_has_metadata(projected_ty);
543 let (projected, llextra) = match projection.elem {
544 mir::ProjectionElem::Deref => {
545 let (base, extra) = if !has_metadata {
546 (base.llval, ptr::null_mut())
548 base.get_fat_ptr(self.cx)
550 if self.cx.statics.borrow().contains_key(&base) {
551 (Base::Static(base), extra)
552 } else if let ty::TyStr = projected_ty.sty {
553 (Base::Str(base), extra)
556 let v = self.cx.const_unsized.borrow().get(&v).map_or(v, |&v| v);
557 let mut val = unsafe { llvm::LLVMGetInitializer(v) };
559 span_bug!(span, "dereference of non-constant pointer `{:?}`",
562 let layout = self.cx.layout_of(projected_ty);
563 if let layout::Abi::Scalar(ref scalar) = layout.abi {
564 let i1_type = Type::i1(self.cx);
565 if scalar.is_bool() && val_ty(val) != i1_type {
567 val = llvm::LLVMConstTrunc(val, i1_type.to_ref());
571 (Base::Value(val), extra)
574 mir::ProjectionElem::Field(ref field, _) => {
575 let llprojected = base.get_field(self.cx, field.index());
576 let llextra = if !has_metadata {
581 (Base::Value(llprojected), llextra)
583 mir::ProjectionElem::Index(index) => {
584 let index = &mir::Operand::Copy(mir::Place::Local(index));
585 let llindex = self.const_operand(index, span)?.llval;
587 let iv = if let Some(iv) = common::const_to_opt_u128(llindex, false) {
590 span_bug!(span, "index is not an integer-constant expression")
593 // Produce an undef instead of a LLVM assertion on OOB.
594 let len = common::const_to_uint(tr_base.len(self.cx));
595 let llelem = if iv < len as u128 {
596 const_get_elt(base.llval, iv as u64)
598 C_undef(self.cx.layout_of(projected_ty).llvm_type(self.cx))
601 (Base::Value(llelem), ptr::null_mut())
603 _ => span_bug!(span, "{:?} in constant", projection.elem)
615 fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
616 -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
617 debug!("const_operand({:?} @ {:?})", operand, span);
618 let result = match *operand {
619 mir::Operand::Copy(ref place) |
620 mir::Operand::Move(ref place) => {
621 Ok(self.const_place(place, span)?.to_const(span))
624 mir::Operand::Constant(ref constant) => {
625 let ty = self.monomorphize(&constant.ty);
626 match constant.literal.clone() {
627 mir::Literal::Promoted { index } => {
628 let mir = &self.mir.promoted[index];
629 MirConstContext::new(self.cx, mir, self.substs, IndexVec::new()).trans()
631 mir::Literal::Value { value } => {
632 if let ConstVal::Unevaluated(def_id, substs) = value.val {
633 let substs = self.monomorphize(&substs);
634 MirConstContext::trans_def(self.cx, def_id, substs, IndexVec::new())
636 Ok(Const::from_constval(self.cx, &value.val, ty))
642 debug!("const_operand({:?} @ {:?}) = {:?}", operand, span,
643 result.as_ref().ok());
647 fn const_array(&self, array_ty: Ty<'tcx>, fields: &[ValueRef])
650 let elem_ty = array_ty.builtin_index().unwrap_or_else(|| {
651 bug!("bad array type {:?}", array_ty)
653 let llunitty = self.cx.layout_of(elem_ty).llvm_type(self.cx);
654 // If the array contains enums, an LLVM array won't work.
655 let val = if fields.iter().all(|&f| val_ty(f) == llunitty) {
656 C_array(llunitty, fields)
658 C_struct(self.cx, fields, false)
660 Const::new(val, array_ty)
663 fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
664 dest_ty: Ty<'tcx>, span: Span)
665 -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
666 let tcx = self.cx.tcx;
667 debug!("const_rvalue({:?}: {:?} @ {:?})", rvalue, dest_ty, span);
668 let val = match *rvalue {
669 mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
671 mir::Rvalue::Repeat(ref elem, count) => {
672 let elem = self.const_operand(elem, span)?;
673 let size = count.as_u64();
674 assert_eq!(size as usize as u64, size);
675 let fields = vec![elem.llval; size as usize];
676 self.const_array(dest_ty, &fields)
679 mir::Rvalue::Aggregate(box mir::AggregateKind::Array(_), ref operands) => {
680 // Make sure to evaluate all operands to
681 // report as many errors as we possibly can.
682 let mut fields = Vec::with_capacity(operands.len());
683 let mut failure = Ok(());
684 for operand in operands {
685 match self.const_operand(operand, span) {
686 Ok(val) => fields.push(val.llval),
687 Err(err) => if failure.is_ok() { failure = Err(err); }
692 self.const_array(dest_ty, &fields)
695 mir::Rvalue::Aggregate(ref kind, ref operands) => {
696 // Make sure to evaluate all operands to
697 // report as many errors as we possibly can.
698 let mut fields = Vec::with_capacity(operands.len());
699 let mut failure = Ok(());
700 for operand in operands {
701 match self.const_operand(operand, span) {
702 Ok(val) => fields.push(val),
703 Err(err) => if failure.is_ok() { failure = Err(err); }
708 trans_const_adt(self.cx, dest_ty, kind, &fields)
711 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
712 let operand = self.const_operand(source, span)?;
713 let cast_ty = self.monomorphize(&cast_ty);
715 let val = match *kind {
716 mir::CastKind::ReifyFnPointer => {
717 match operand.ty.sty {
718 ty::TyFnDef(def_id, substs) => {
719 if tcx.has_attr(def_id, "rustc_args_required_const") {
720 bug!("reifying a fn ptr that requires \
723 callee::resolve_and_get_fn(self.cx, def_id, substs)
726 span_bug!(span, "{} cannot be reified to a fn ptr",
731 mir::CastKind::ClosureFnPointer => {
732 match operand.ty.sty {
733 ty::TyClosure(def_id, substs) => {
734 // Get the def_id for FnOnce::call_once
735 let fn_once = tcx.lang_items().fn_once_trait().unwrap();
737 .global_tcx().associated_items(fn_once)
738 .find(|it| it.kind == ty::AssociatedKind::Method)
740 // Now create its substs [Closure, Tuple]
741 let input = substs.closure_sig(def_id, tcx).input(0);
742 let input = tcx.erase_late_bound_regions_and_normalize(&input);
743 let substs = tcx.mk_substs([operand.ty, input]
744 .iter().cloned().map(Kind::from));
745 callee::resolve_and_get_fn(self.cx, call_once, substs)
748 bug!("{} cannot be cast to a fn ptr", operand.ty)
752 mir::CastKind::UnsafeFnPointer => {
753 // this is a no-op at the LLVM level
756 mir::CastKind::Unsize => {
757 let pointee_ty = operand.ty.builtin_deref(true)
758 .expect("consts: unsizing got non-pointer type").ty;
759 let (base, old_info) = if !self.cx.type_is_sized(pointee_ty) {
760 // Normally, the source is a thin pointer and we are
761 // adding extra info to make a fat pointer. The exception
762 // is when we are upcasting an existing object fat pointer
763 // to use a different vtable. In that case, we want to
764 // load out the original data pointer so we can repackage
766 let (base, extra) = operand.get_fat_ptr(self.cx);
769 (operand.llval, None)
772 let unsized_ty = cast_ty.builtin_deref(true)
773 .expect("consts: unsizing got non-pointer target type").ty;
774 let ptr_ty = self.cx.layout_of(unsized_ty).llvm_type(self.cx).ptr_to();
775 let base = consts::ptrcast(base, ptr_ty);
776 let info = base::unsized_info(self.cx, pointee_ty,
777 unsized_ty, old_info);
779 if old_info.is_none() {
780 let prev_const = self.cx.const_unsized.borrow_mut()
781 .insert(base, operand.llval);
782 assert!(prev_const.is_none() || prev_const == Some(operand.llval));
784 C_fat_ptr(self.cx, base, info)
786 mir::CastKind::Misc if self.cx.layout_of(operand.ty).is_llvm_immediate() => {
787 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
788 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
789 let cast_layout = self.cx.layout_of(cast_ty);
790 assert!(cast_layout.is_llvm_immediate());
791 let ll_t_out = cast_layout.immediate_llvm_type(self.cx);
792 let llval = operand.llval;
794 let mut signed = false;
795 let l = self.cx.layout_of(operand.ty);
796 if let layout::Abi::Scalar(ref scalar) = l.abi {
797 if let layout::Int(_, true) = scalar.value {
803 match (r_t_in, r_t_out) {
804 (CastTy::Int(_), CastTy::Int(_)) => {
805 let s = signed as llvm::Bool;
806 llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s)
808 (CastTy::Int(_), CastTy::Float) => {
809 cast_const_int_to_float(self.cx, llval, signed, ll_t_out)
811 (CastTy::Float, CastTy::Float) => {
812 llvm::LLVMConstFPCast(llval, ll_t_out.to_ref())
814 (CastTy::Float, CastTy::Int(IntTy::I)) => {
815 cast_const_float_to_int(self.cx, &operand,
816 true, ll_t_out, span)
818 (CastTy::Float, CastTy::Int(_)) => {
819 cast_const_float_to_int(self.cx, &operand,
820 false, ll_t_out, span)
822 (CastTy::Ptr(_), CastTy::Ptr(_)) |
823 (CastTy::FnPtr, CastTy::Ptr(_)) |
824 (CastTy::RPtr(_), CastTy::Ptr(_)) => {
825 consts::ptrcast(llval, ll_t_out)
827 (CastTy::Int(_), CastTy::Ptr(_)) => {
828 let s = signed as llvm::Bool;
829 let usize_llval = llvm::LLVMConstIntCast(llval,
830 self.cx.isize_ty.to_ref(), s);
831 llvm::LLVMConstIntToPtr(usize_llval, ll_t_out.to_ref())
833 (CastTy::Ptr(_), CastTy::Int(_)) |
834 (CastTy::FnPtr, CastTy::Int(_)) => {
835 llvm::LLVMConstPtrToInt(llval, ll_t_out.to_ref())
837 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
841 mir::CastKind::Misc => { // Casts from a fat-ptr.
842 let l = self.cx.layout_of(operand.ty);
843 let cast = self.cx.layout_of(cast_ty);
844 if l.is_llvm_scalar_pair() {
845 let (data_ptr, meta) = operand.get_fat_ptr(self.cx);
846 if cast.is_llvm_scalar_pair() {
847 let data_cast = consts::ptrcast(data_ptr,
848 cast.scalar_pair_element_llvm_type(self.cx, 0));
849 C_fat_ptr(self.cx, data_cast, meta)
850 } else { // cast to thin-ptr
851 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
852 // pointer-cast of that pointer to desired pointer type.
853 let llcast_ty = cast.immediate_llvm_type(self.cx);
854 consts::ptrcast(data_ptr, llcast_ty)
857 bug!("Unexpected non-fat-pointer operand")
861 Const::new(val, cast_ty)
864 mir::Rvalue::Ref(_, bk, ref place) => {
865 let tr_place = self.const_place(place, span)?;
867 let ty = tr_place.ty;
868 let ref_ty = tcx.mk_ref(tcx.types.re_erased,
869 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
871 let base = match tr_place.base {
872 Base::Value(llval) => {
873 // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
874 let align = if self.cx.type_is_sized(ty) {
877 self.cx.tcx.data_layout.pointer_align
879 if let mir::BorrowKind::Mut { .. } = bk {
880 consts::addr_of_mut(self.cx, llval, align, "ref_mut")
882 consts::addr_of(self.cx, llval, align, "ref")
886 Base::Static(llval) => llval
889 let ptr = if self.cx.type_is_sized(ty) {
892 C_fat_ptr(self.cx, base, tr_place.llextra)
894 Const::new(ptr, ref_ty)
897 mir::Rvalue::Len(ref place) => {
898 let tr_place = self.const_place(place, span)?;
899 Const::new(tr_place.len(self.cx), tcx.types.usize)
902 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
903 let lhs = self.const_operand(lhs, span)?;
904 let rhs = self.const_operand(rhs, span)?;
906 let binop_ty = op.ty(tcx, lhs.ty, rhs.ty);
907 let (lhs, rhs) = (lhs.llval, rhs.llval);
908 Const::new(const_scalar_binop(op, lhs, rhs, ty), binop_ty)
911 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
912 let lhs = self.const_operand(lhs, span)?;
913 let rhs = self.const_operand(rhs, span)?;
915 let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
916 let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool], false);
917 let (lhs, rhs) = (lhs.llval, rhs.llval);
918 assert!(!ty.is_fp());
920 match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
921 Some((llval, of)) => {
922 trans_const_adt(self.cx, binop_ty, &mir::AggregateKind::Tuple, &[
923 Const::new(llval, val_ty),
924 Const::new(C_bool(self.cx, of), tcx.types.bool)
928 span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}",
929 rvalue, Value(lhs), Value(rhs));
934 mir::Rvalue::UnaryOp(op, ref operand) => {
935 let operand = self.const_operand(operand, span)?;
936 let lloperand = operand.llval;
937 let llval = match op {
940 llvm::LLVMConstNot(lloperand)
944 let is_float = operand.ty.is_fp();
947 llvm::LLVMConstFNeg(lloperand)
949 llvm::LLVMConstNeg(lloperand)
954 Const::new(llval, operand.ty)
957 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
958 assert!(self.cx.type_is_sized(ty));
959 let llval = C_usize(self.cx, self.cx.size_of(ty).bytes());
960 Const::new(llval, tcx.types.usize)
963 _ => span_bug!(span, "{:?} in constant", rvalue)
966 debug!("const_rvalue({:?}: {:?} @ {:?}) = {:?}", rvalue, dest_ty, span, val);
973 fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
975 ty::TyInt(int_type) => const_to_opt_u128(value, true)
976 .and_then(|input| ConstInt::new_signed(input as i128, int_type,
977 tcx.sess.target.isize_ty)),
978 ty::TyUint(uint_type) => const_to_opt_u128(value, false)
979 .and_then(|input| ConstInt::new_unsigned(input, uint_type,
980 tcx.sess.target.usize_ty)),
986 pub fn const_scalar_binop(op: mir::BinOp,
989 input_ty: Ty) -> ValueRef {
990 assert!(!input_ty.is_simd());
991 let is_float = input_ty.is_fp();
992 let signed = input_ty.is_signed();
996 mir::BinOp::Add if is_float => llvm::LLVMConstFAdd(lhs, rhs),
997 mir::BinOp::Add => llvm::LLVMConstAdd(lhs, rhs),
999 mir::BinOp::Sub if is_float => llvm::LLVMConstFSub(lhs, rhs),
1000 mir::BinOp::Sub => llvm::LLVMConstSub(lhs, rhs),
1002 mir::BinOp::Mul if is_float => llvm::LLVMConstFMul(lhs, rhs),
1003 mir::BinOp::Mul => llvm::LLVMConstMul(lhs, rhs),
1005 mir::BinOp::Div if is_float => llvm::LLVMConstFDiv(lhs, rhs),
1006 mir::BinOp::Div if signed => llvm::LLVMConstSDiv(lhs, rhs),
1007 mir::BinOp::Div => llvm::LLVMConstUDiv(lhs, rhs),
1009 mir::BinOp::Rem if is_float => llvm::LLVMConstFRem(lhs, rhs),
1010 mir::BinOp::Rem if signed => llvm::LLVMConstSRem(lhs, rhs),
1011 mir::BinOp::Rem => llvm::LLVMConstURem(lhs, rhs),
1013 mir::BinOp::BitXor => llvm::LLVMConstXor(lhs, rhs),
1014 mir::BinOp::BitAnd => llvm::LLVMConstAnd(lhs, rhs),
1015 mir::BinOp::BitOr => llvm::LLVMConstOr(lhs, rhs),
1016 mir::BinOp::Shl => {
1017 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
1018 llvm::LLVMConstShl(lhs, rhs)
1020 mir::BinOp::Shr => {
1021 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
1022 if signed { llvm::LLVMConstAShr(lhs, rhs) }
1023 else { llvm::LLVMConstLShr(lhs, rhs) }
1025 mir::BinOp::Eq | mir::BinOp::Ne |
1026 mir::BinOp::Lt | mir::BinOp::Le |
1027 mir::BinOp::Gt | mir::BinOp::Ge => {
1029 let cmp = base::bin_op_to_fcmp_predicate(op.to_hir_binop());
1030 llvm::LLVMConstFCmp(cmp, lhs, rhs)
1032 let cmp = base::bin_op_to_icmp_predicate(op.to_hir_binop(),
1034 llvm::LLVMConstICmp(cmp, lhs, rhs)
1037 mir::BinOp::Offset => unreachable!("BinOp::Offset in const-eval!")
1042 pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1047 -> Option<(ValueRef, bool)> {
1048 if let (Some(lhs), Some(rhs)) = (to_const_int(lllhs, input_ty, tcx),
1049 to_const_int(llrhs, input_ty, tcx)) {
1050 let result = match op {
1051 mir::BinOp::Add => lhs + rhs,
1052 mir::BinOp::Sub => lhs - rhs,
1053 mir::BinOp::Mul => lhs * rhs,
1054 mir::BinOp::Shl => lhs << rhs,
1055 mir::BinOp::Shr => lhs >> rhs,
1057 bug!("Operator `{:?}` is not a checkable operator", op)
1061 let of = match result {
1063 Err(ConstMathErr::Overflow(_)) |
1064 Err(ConstMathErr::ShiftNegative) => true,
1066 bug!("Operator `{:?}` on `{:?}` and `{:?}` errored: {}",
1067 op, lhs, rhs, err.description());
1071 Some((const_scalar_binop(op, lllhs, llrhs, input_ty), of))
1077 unsafe fn cast_const_float_to_int(cx: &CodegenCx,
1081 span: Span) -> ValueRef {
1082 let llval = operand.llval;
1083 let float_bits = match operand.ty.sty {
1084 ty::TyFloat(fty) => fty.bit_width(),
1085 _ => bug!("cast_const_float_to_int: operand not a float"),
1087 // Note: this breaks if llval is a complex constant expression rather than a simple constant.
1088 // One way that might happen would be if addresses could be turned into integers in constant
1089 // expressions, but that doesn't appear to be possible?
1090 // In any case, an ICE is better than producing undef.
1091 let llval_bits = consts::bitcast(llval, Type::ix(cx, float_bits as u64));
1092 let bits = const_to_opt_u128(llval_bits, false).unwrap_or_else(|| {
1093 panic!("could not get bits of constant float {:?}",
1096 let int_width = int_ty.int_width() as usize;
1097 // Try to convert, but report an error for overflow and NaN. This matches HIR const eval.
1098 let cast_result = match float_bits {
1099 32 if signed => ieee::Single::from_bits(bits).to_i128(int_width).map(|v| v as u128),
1100 64 if signed => ieee::Double::from_bits(bits).to_i128(int_width).map(|v| v as u128),
1101 32 => ieee::Single::from_bits(bits).to_u128(int_width),
1102 64 => ieee::Double::from_bits(bits).to_u128(int_width),
1103 n => bug!("unsupported float width {}", n),
1105 if cast_result.status.contains(Status::INVALID_OP) {
1106 let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast };
1107 err.report(cx.tcx, span, "expression");
1109 C_uint_big(int_ty, cast_result.value)
1112 unsafe fn cast_const_int_to_float(cx: &CodegenCx,
1115 float_ty: Type) -> ValueRef {
1116 // Note: this breaks if llval is a complex constant expression rather than a simple constant.
1117 // One way that might happen would be if addresses could be turned into integers in constant
1118 // expressions, but that doesn't appear to be possible?
1119 // In any case, an ICE is better than producing undef.
1120 let value = const_to_opt_u128(llval, signed).unwrap_or_else(|| {
1121 panic!("could not get z128 value of constant integer {:?}",
1125 llvm::LLVMConstSIToFP(llval, float_ty.to_ref())
1126 } else if float_ty.float_width() == 32 && value >= MAX_F32_PLUS_HALF_ULP {
1127 // We're casting to f32 and the value is > f32::MAX + 0.5 ULP -> round up to infinity.
1128 let infinity_bits = C_u32(cx, ieee::Single::INFINITY.to_bits() as u32);
1129 consts::bitcast(infinity_bits, float_ty)
1131 llvm::LLVMConstUIToFP(llval, float_ty.to_ref())
1135 impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
1136 pub fn trans_constant(&mut self,
1137 bx: &Builder<'a, 'tcx>,
1138 constant: &mir::Constant<'tcx>)
1141 debug!("trans_constant({:?})", constant);
1142 let ty = self.monomorphize(&constant.ty);
1143 let result = match constant.literal.clone() {
1144 mir::Literal::Promoted { index } => {
1145 let mir = &self.mir.promoted[index];
1146 MirConstContext::new(bx.cx, mir, self.param_substs, IndexVec::new()).trans()
1148 mir::Literal::Value { value } => {
1149 if let ConstVal::Unevaluated(def_id, substs) = value.val {
1150 let substs = self.monomorphize(&substs);
1151 MirConstContext::trans_def(bx.cx, def_id, substs, IndexVec::new())
1153 Ok(Const::from_constval(bx.cx, &value.val, ty))
1158 let result = result.unwrap_or_else(|_| {
1159 // We've errored, so we don't have to produce working code.
1160 let llty = bx.cx.layout_of(ty).llvm_type(bx.cx);
1161 Const::new(C_undef(llty), ty)
1164 debug!("trans_constant({:?}) = {:?}", constant, result);
1170 pub fn trans_static_initializer<'a, 'tcx>(
1171 cx: &CodegenCx<'a, 'tcx>,
1173 -> Result<ValueRef, ConstEvalErr<'tcx>>
1175 MirConstContext::trans_def(cx, def_id, Substs::empty(), IndexVec::new())
1179 /// Construct a constant value, suitable for initializing a
1180 /// GlobalVariable, given a case and constant values for its fields.
1181 /// Note that this may have a different LLVM type (and different
1182 /// alignment!) from the representation's `type_of`, so it needs a
1183 /// pointer cast before use.
1185 /// The LLVM type system does not directly support unions, and only
1186 /// pointers can be bitcast, so a constant (and, by extension, the
1187 /// GlobalVariable initialized by it) will have a type that can vary
1188 /// depending on which case of an enum it is.
1190 /// To understand the alignment situation, consider `enum E { V64(u64),
1191 /// V32(u32, u32) }` on Windows. The type has 8-byte alignment to
1192 /// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
1193 /// i32, i32}`, which is 4-byte aligned.
1195 /// Currently the returned value has the same size as the type, but
1196 /// this could be changed in the future to avoid allocating unnecessary
1197 /// space after values of shorter-than-maximum cases.
1198 fn trans_const_adt<'a, 'tcx>(
1199 cx: &CodegenCx<'a, 'tcx>,
1201 kind: &mir::AggregateKind,
1202 vals: &[Const<'tcx>]
1204 let l = cx.layout_of(t);
1205 let variant_index = match *kind {
1206 mir::AggregateKind::Adt(_, index, _, _) => index,
1210 if let layout::Abi::Uninhabited = l.abi {
1211 return Const::new(C_undef(l.llvm_type(cx)), t);
1215 layout::Variants::Single { index } => {
1216 assert_eq!(variant_index, index);
1217 if let layout::FieldPlacement::Union(_) = l.fields {
1218 assert_eq!(variant_index, 0);
1219 assert_eq!(vals.len(), 1);
1220 let (field_size, field_align) = cx.size_and_align_of(vals[0].ty);
1223 padding(cx, l.size - field_size)
1226 let packed = l.align.abi() < field_align.abi();
1227 Const::new(C_struct(cx, &contents, packed), t)
1229 if let layout::Abi::Vector { .. } = l.abi {
1230 if let layout::FieldPlacement::Array { .. } = l.fields {
1231 return Const::new(C_vector(&vals.iter().map(|x| x.llval)
1232 .collect::<Vec<_>>()), t);
1235 build_const_struct(cx, l, vals, None)
1238 layout::Variants::Tagged { .. } => {
1239 let discr = match *kind {
1240 mir::AggregateKind::Adt(adt_def, _, _, _) => {
1241 adt_def.discriminant_for_variant(cx.tcx, variant_index)
1242 .to_u128_unchecked() as u64
1246 let discr_field = l.field(cx, 0);
1247 let discr = C_int(discr_field.llvm_type(cx), discr as i64);
1248 if let layout::Abi::Scalar(_) = l.abi {
1249 Const::new(discr, t)
1251 let discr = Const::new(discr, discr_field.ty);
1252 build_const_struct(cx, l.for_variant(cx, variant_index), vals, Some(discr))
1255 layout::Variants::NicheFilling {
1261 if variant_index == dataful_variant {
1262 build_const_struct(cx, l.for_variant(cx, dataful_variant), vals, None)
1264 let niche = l.field(cx, 0);
1265 let niche_llty = niche.llvm_type(cx);
1266 let niche_value = ((variant_index - niche_variants.start) as u128)
1267 .wrapping_add(niche_start);
1268 // FIXME(eddyb) Check the actual primitive type here.
1269 let niche_llval = if niche_value == 0 {
1270 // HACK(eddyb) Using `C_null` as it works on all types.
1273 C_uint_big(niche_llty, niche_value)
1275 build_const_struct(cx, l, &[Const::new(niche_llval, niche.ty)], None)
1281 /// Building structs is a little complicated, because we might need to
1282 /// insert padding if a field's value is less aligned than its type.
1284 /// Continuing the example from `trans_const_adt`, a value of type `(u32,
1285 /// E)` should have the `E` at offset 8, but if that field's
1286 /// initializer is 4-byte aligned then simply translating the tuple as
1287 /// a two-element struct will locate it at offset 4, and accesses to it
1288 /// will read the wrong memory.
1289 fn build_const_struct<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
1290 layout: layout::TyLayout<'tcx>,
1291 vals: &[Const<'tcx>],
1292 discr: Option<Const<'tcx>>)
1294 assert_eq!(vals.len(), layout.fields.count());
1297 layout::Abi::Scalar(_) |
1298 layout::Abi::ScalarPair(..) |
1299 layout::Abi::Vector { .. } if discr.is_none() => {
1300 let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| {
1301 (f, layout.fields.offset(i))
1302 }).filter(|&(f, _)| !cx.layout_of(f.ty).is_zst());
1303 match (non_zst_fields.next(), non_zst_fields.next()) {
1304 (Some((x, offset)), None) if offset.bytes() == 0 => {
1305 return Const::new(x.llval, layout.ty);
1307 (Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => {
1308 return Const::new(C_struct(cx, &[a.llval, b.llval], false), layout.ty);
1310 (Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => {
1311 return Const::new(C_struct(cx, &[b.llval, a.llval], false), layout.ty);
1319 // offset of current value
1320 let mut packed = false;
1321 let mut offset = Size::from_bytes(0);
1322 let mut cfields = Vec::new();
1323 cfields.reserve(discr.is_some() as usize + 1 + layout.fields.count() * 2);
1325 if let Some(discr) = discr {
1326 let (field_size, field_align) = cx.size_and_align_of(discr.ty);
1327 packed |= layout.align.abi() < field_align.abi();
1328 cfields.push(discr.llval);
1329 offset = field_size;
1332 let parts = layout.fields.index_by_increasing_offset().map(|i| {
1333 (vals[i], layout.fields.offset(i))
1335 for (val, target_offset) in parts {
1336 let (field_size, field_align) = cx.size_and_align_of(val.ty);
1337 packed |= layout.align.abi() < field_align.abi();
1338 cfields.push(padding(cx, target_offset - offset));
1339 cfields.push(val.llval);
1340 offset = target_offset + field_size;
1343 // Pad to the size of the whole type, not e.g. the variant.
1344 cfields.push(padding(cx, cx.size_of(layout.ty) - offset));
1346 Const::new(C_struct(cx, &cfields, packed), layout.ty)
1349 fn padding(cx: &CodegenCx, size: Size) -> ValueRef {
1350 C_undef(Type::array(&Type::i8(cx), size.bytes()))