1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, ValueRef};
12 use rustc::middle::const_val::ConstVal;
13 use rustc_const_eval::{ErrKind, ConstEvalErr, report_const_eval_err};
14 use rustc_const_math::ConstInt::*;
15 use rustc_const_math::ConstFloat::*;
16 use rustc_const_math::{ConstInt, ConstMathErr};
17 use rustc::hir::def_id::DefId;
18 use rustc::infer::TransNormalize;
20 use rustc::mir::tcx::LvalueTy;
21 use rustc::ty::{self, layout, Ty, TyCtxt, TypeFoldable};
22 use rustc::ty::cast::{CastTy, IntTy};
23 use rustc::ty::subst::Substs;
24 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
25 use {abi, adt, base, Disr, machine};
28 use common::{self, CrateContext, const_get_elt, val_ty};
29 use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral};
30 use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef};
31 use common::const_to_opt_u128;
33 use monomorphize::{self, Instance};
43 use super::operand::{OperandRef, OperandValue};
44 use super::MirContext;
46 /// A sized constant rvalue.
47 /// The LLVM type might not be the same for a single Rust type,
48 /// e.g. each enum variant would have its own LLVM struct type.
49 #[derive(Copy, Clone)]
50 pub struct Const<'tcx> {
55 impl<'tcx> Const<'tcx> {
56 pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> {
63 /// Translate ConstVal into a LLVM constant value.
64 pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>,
68 let llty = type_of::type_of(ccx, ty);
70 ConstVal::Float(F32(v)) => C_floating_f64(v as f64, llty),
71 ConstVal::Float(F64(v)) => C_floating_f64(v, llty),
72 ConstVal::Float(FInfer {..}) => bug!("MIR must not use `{:?}`", cv),
73 ConstVal::Bool(v) => C_bool(ccx, v),
74 ConstVal::Integral(I8(v)) => C_integral(Type::i8(ccx), v as u64, true),
75 ConstVal::Integral(I16(v)) => C_integral(Type::i16(ccx), v as u64, true),
76 ConstVal::Integral(I32(v)) => C_integral(Type::i32(ccx), v as u64, true),
77 ConstVal::Integral(I64(v)) => C_integral(Type::i64(ccx), v as u64, true),
78 ConstVal::Integral(I128(v)) => C_big_integral(Type::i128(ccx), v as u128),
79 ConstVal::Integral(Isize(v)) => {
80 let i = v.as_i64(ccx.tcx().sess.target.int_type);
81 C_integral(Type::int(ccx), i as u64, true)
83 ConstVal::Integral(U8(v)) => C_integral(Type::i8(ccx), v as u64, false),
84 ConstVal::Integral(U16(v)) => C_integral(Type::i16(ccx), v as u64, false),
85 ConstVal::Integral(U32(v)) => C_integral(Type::i32(ccx), v as u64, false),
86 ConstVal::Integral(U64(v)) => C_integral(Type::i64(ccx), v, false),
87 ConstVal::Integral(U128(v)) => C_big_integral(Type::i128(ccx), v),
88 ConstVal::Integral(Usize(v)) => {
89 let u = v.as_u64(ccx.tcx().sess.target.uint_type);
90 C_integral(Type::int(ccx), u, false)
92 ConstVal::Integral(Infer(_)) |
93 ConstVal::Integral(InferSigned(_)) => bug!("MIR must not use `{:?}`", cv),
94 ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
95 ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"),
96 ConstVal::Struct(_) | ConstVal::Tuple(_) |
97 ConstVal::Array(..) | ConstVal::Repeat(..) |
98 ConstVal::Function(_) => {
99 bug!("MIR must not use `{:?}` (which refers to a local ID)", cv)
101 ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false),
104 assert!(!ty.has_erasable_regions());
109 fn get_pair(&self) -> (ValueRef, ValueRef) {
110 (const_get_elt(self.llval, &[0]),
111 const_get_elt(self.llval, &[1]))
114 fn get_fat_ptr(&self) -> (ValueRef, ValueRef) {
115 assert_eq!(abi::FAT_PTR_ADDR, 0);
116 assert_eq!(abi::FAT_PTR_EXTRA, 1);
120 fn as_lvalue(&self) -> ConstLvalue<'tcx> {
122 base: Base::Value(self.llval),
123 llextra: ptr::null_mut(),
128 pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
129 let llty = type_of::immediate_type_of(ccx, self.ty);
130 let llvalty = val_ty(self.llval);
132 let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) {
133 let (a, b) = self.get_pair();
134 OperandValue::Pair(a, b)
135 } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) {
136 // If the types match, we can use the value directly.
137 OperandValue::Immediate(self.llval)
139 // Otherwise, or if the value is not immediate, we create
140 // a constant LLVM global and cast its address if necessary.
141 let align = type_of::align_of(ccx, self.ty);
142 let ptr = consts::addr_of(ccx, self.llval, align, "const");
143 OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()))
153 impl<'tcx> fmt::Debug for Const<'tcx> {
154 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
155 write!(f, "Const({:?}: {:?})", Value(self.llval), self.ty)
159 #[derive(Copy, Clone)]
161 /// A constant value without an unique address.
164 /// String literal base pointer (cast from array).
167 /// The address of a static.
171 /// An lvalue as seen from a constant.
172 #[derive(Copy, Clone)]
173 struct ConstLvalue<'tcx> {
179 impl<'tcx> ConstLvalue<'tcx> {
180 fn to_const(&self, span: Span) -> Const<'tcx> {
182 Base::Value(val) => Const::new(val, self.ty),
184 span_bug!(span, "loading from `str` ({:?}) in constant",
187 Base::Static(val) => {
188 span_bug!(span, "loading from `static` ({:?}) in constant",
194 pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
196 ty::TyArray(_, n) => C_uint(ccx, n),
197 ty::TySlice(_) | ty::TyStr => {
198 assert!(self.llextra != ptr::null_mut());
201 _ => bug!("unexpected type `{}` in ConstLvalue::len", self.ty)
206 /// Machinery for translating a constant's MIR to LLVM values.
207 /// FIXME(eddyb) use miri and lower its allocations to LLVM.
208 struct MirConstContext<'a, 'tcx: 'a> {
209 ccx: &'a CrateContext<'a, 'tcx>,
210 mir: &'a mir::Mir<'tcx>,
212 /// Type parameters for const fn and associated constants.
213 substs: &'tcx Substs<'tcx>,
215 /// Values of locals in a constant or const fn.
216 locals: IndexVec<mir::Local, Option<Const<'tcx>>>
220 impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
221 fn new(ccx: &'a CrateContext<'a, 'tcx>,
222 mir: &'a mir::Mir<'tcx>,
223 substs: &'tcx Substs<'tcx>,
224 args: IndexVec<mir::Local, Const<'tcx>>)
225 -> MirConstContext<'a, 'tcx> {
226 let mut context = MirConstContext {
230 locals: (0..mir.local_decls.len()).map(|_| None).collect(),
232 for (i, arg) in args.into_iter().enumerate() {
233 // Locals after local 0 are the function arguments
234 let index = mir::Local::new(i + 1);
235 context.locals[index] = Some(arg);
240 fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
241 instance: Instance<'tcx>,
242 args: IndexVec<mir::Local, Const<'tcx>>)
243 -> Result<Const<'tcx>, ConstEvalErr> {
244 let instance = instance.resolve_const(ccx.shared());
245 let mir = ccx.tcx().item_mir(instance.def);
246 MirConstContext::new(ccx, &mir, instance.substs, args).trans()
249 fn monomorphize<T>(&self, value: &T) -> T
250 where T: TransNormalize<'tcx>
252 monomorphize::apply_param_substs(self.ccx.shared(),
257 fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr> {
258 let tcx = self.ccx.tcx();
259 let mut bb = mir::START_BLOCK;
261 // Make sure to evaluate all statemenets to
262 // report as many errors as we possibly can.
263 let mut failure = Ok(());
266 let data = &self.mir[bb];
267 for statement in &data.statements {
268 let span = statement.source_info.span;
269 match statement.kind {
270 mir::StatementKind::Assign(ref dest, ref rvalue) => {
271 let ty = dest.ty(self.mir, tcx);
272 let ty = self.monomorphize(&ty).to_ty(tcx);
273 match self.const_rvalue(rvalue, ty, span) {
274 Ok(value) => self.store(dest, value, span),
275 Err(err) => if failure.is_ok() { failure = Err(err); }
278 mir::StatementKind::StorageLive(_) |
279 mir::StatementKind::StorageDead(_) |
280 mir::StatementKind::Nop => {}
281 mir::StatementKind::SetDiscriminant{ .. } => {
282 span_bug!(span, "SetDiscriminant should not appear in constants?");
287 let terminator = data.terminator();
288 let span = terminator.source_info.span;
289 bb = match terminator.kind {
290 mir::TerminatorKind::Drop { target, .. } | // No dropping.
291 mir::TerminatorKind::Goto { target } => target,
292 mir::TerminatorKind::Return => {
294 return Ok(self.locals[mir::RETURN_POINTER].unwrap_or_else(|| {
295 span_bug!(span, "no returned value in constant");
299 mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => {
300 let cond = self.const_operand(cond, span)?;
301 let cond_bool = common::const_to_uint(cond.llval) != 0;
302 if cond_bool != expected {
303 let err = match *msg {
304 mir::AssertMessage::BoundsCheck { ref len, ref index } => {
305 let len = self.const_operand(len, span)?;
306 let index = self.const_operand(index, span)?;
307 ErrKind::IndexOutOfBounds {
308 len: common::const_to_uint(len.llval),
309 index: common::const_to_uint(index.llval)
312 mir::AssertMessage::Math(ref err) => {
313 ErrKind::Math(err.clone())
317 let err = ConstEvalErr{ span: span, kind: err };
318 report_const_eval_err(tcx, &err, span, "expression").emit();
324 mir::TerminatorKind::Call { ref func, ref args, ref destination, .. } => {
325 let fn_ty = func.ty(self.mir, tcx);
326 let fn_ty = self.monomorphize(&fn_ty);
327 let instance = match fn_ty.sty {
328 ty::TyFnDef(def_id, substs, _) => {
329 Instance::new(def_id, substs)
331 _ => span_bug!(span, "calling {:?} (of type {}) in constant",
335 let mut const_args = IndexVec::with_capacity(args.len());
337 match self.const_operand(arg, span) {
338 Ok(arg) => { const_args.push(arg); },
339 Err(err) => if failure.is_ok() { failure = Err(err); }
342 if let Some((ref dest, target)) = *destination {
343 match MirConstContext::trans_def(self.ccx, instance, const_args) {
344 Ok(value) => self.store(dest, value, span),
345 Err(err) => if failure.is_ok() { failure = Err(err); }
349 span_bug!(span, "diverging {:?} in constant", terminator.kind);
352 _ => span_bug!(span, "{:?} in constant", terminator.kind)
357 fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) {
358 if let mir::Lvalue::Local(index) = *dest {
359 self.locals[index] = Some(value);
361 span_bug!(span, "assignment to {:?} in constant", dest);
365 fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
366 -> Result<ConstLvalue<'tcx>, ConstEvalErr> {
367 let tcx = self.ccx.tcx();
369 if let mir::Lvalue::Local(index) = *lvalue {
370 return Ok(self.locals[index].unwrap_or_else(|| {
371 span_bug!(span, "{:?} not initialized", lvalue)
375 let lvalue = match *lvalue {
376 mir::Lvalue::Local(_) => bug!(), // handled above
377 mir::Lvalue::Static(def_id) => {
379 base: Base::Static(consts::get_static(self.ccx, def_id)),
380 llextra: ptr::null_mut(),
381 ty: lvalue.ty(self.mir, tcx).to_ty(tcx)
384 mir::Lvalue::Projection(ref projection) => {
385 let tr_base = self.const_lvalue(&projection.base, span)?;
386 let projected_ty = LvalueTy::Ty { ty: tr_base.ty }
387 .projection_ty(tcx, &projection.elem);
388 let base = tr_base.to_const(span);
389 let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx);
390 let is_sized = self.ccx.shared().type_is_sized(projected_ty);
392 let (projected, llextra) = match projection.elem {
393 mir::ProjectionElem::Deref => {
394 let (base, extra) = if is_sized {
395 (base.llval, ptr::null_mut())
399 if self.ccx.statics().borrow().contains_key(&base) {
400 (Base::Static(base), extra)
401 } else if let ty::TyStr = projected_ty.sty {
402 (Base::Str(base), extra)
405 let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v);
406 let mut val = unsafe { llvm::LLVMGetInitializer(v) };
408 span_bug!(span, "dereference of non-constant pointer `{:?}`",
411 if projected_ty.is_bool() {
413 val = llvm::LLVMConstTrunc(val, Type::i1(self.ccx).to_ref());
416 (Base::Value(val), extra)
419 mir::ProjectionElem::Field(ref field, _) => {
420 let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval,
421 Disr(0), field.index());
422 let llextra = if is_sized {
427 (Base::Value(llprojected), llextra)
429 mir::ProjectionElem::Index(ref index) => {
430 let llindex = self.const_operand(index, span)?.llval;
432 let iv = if let Some(iv) = common::const_to_opt_u128(llindex, false) {
435 span_bug!(span, "index is not an integer-constant expression")
438 // Produce an undef instead of a LLVM assertion on OOB.
439 let len = common::const_to_uint(tr_base.len(self.ccx));
440 let llelem = if iv < len as u128 {
441 const_get_elt(base.llval, &[iv as u32])
443 C_undef(type_of::type_of(self.ccx, projected_ty))
446 (Base::Value(llelem), ptr::null_mut())
448 _ => span_bug!(span, "{:?} in constant", projection.elem)
460 fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
461 -> Result<Const<'tcx>, ConstEvalErr> {
462 debug!("const_operand({:?} @ {:?})", operand, span);
463 let result = match *operand {
464 mir::Operand::Consume(ref lvalue) => {
465 Ok(self.const_lvalue(lvalue, span)?.to_const(span))
468 mir::Operand::Constant(ref constant) => {
469 let ty = self.monomorphize(&constant.ty);
470 match constant.literal.clone() {
471 mir::Literal::Item { def_id, substs } => {
472 // Shortcut for zero-sized types, including function item
473 // types, which would not work with MirConstContext.
474 if common::type_is_zero_size(self.ccx, ty) {
475 let llty = type_of::type_of(self.ccx, ty);
476 return Ok(Const::new(C_null(llty), ty));
479 let substs = self.monomorphize(&substs);
480 let instance = Instance::new(def_id, substs);
481 MirConstContext::trans_def(self.ccx, instance, IndexVec::new())
483 mir::Literal::Promoted { index } => {
484 let mir = &self.mir.promoted[index];
485 MirConstContext::new(self.ccx, mir, self.substs, IndexVec::new()).trans()
487 mir::Literal::Value { value } => {
488 Ok(Const::from_constval(self.ccx, value, ty))
493 debug!("const_operand({:?} @ {:?}) = {:?}", operand, span,
494 result.as_ref().ok());
498 fn const_array(&self, array_ty: Ty<'tcx>, fields: &[ValueRef])
501 let elem_ty = array_ty.builtin_index().unwrap_or_else(|| {
502 bug!("bad array type {:?}", array_ty)
504 let llunitty = type_of::type_of(self.ccx, elem_ty);
505 // If the array contains enums, an LLVM array won't work.
506 let val = if fields.iter().all(|&f| val_ty(f) == llunitty) {
507 C_array(llunitty, fields)
509 C_struct(self.ccx, fields, false)
511 Const::new(val, array_ty)
514 fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
515 dest_ty: Ty<'tcx>, span: Span)
516 -> Result<Const<'tcx>, ConstEvalErr> {
517 let tcx = self.ccx.tcx();
518 debug!("const_rvalue({:?}: {:?} @ {:?})", rvalue, dest_ty, span);
519 let val = match *rvalue {
520 mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
522 mir::Rvalue::Repeat(ref elem, ref count) => {
523 let elem = self.const_operand(elem, span)?;
524 let size = count.value.as_u64(tcx.sess.target.uint_type);
525 let fields = vec![elem.llval; size as usize];
526 self.const_array(dest_ty, &fields)
529 mir::Rvalue::Aggregate(ref kind, ref operands) => {
530 // Make sure to evaluate all operands to
531 // report as many errors as we possibly can.
532 let mut fields = Vec::with_capacity(operands.len());
533 let mut failure = Ok(());
534 for operand in operands {
535 match self.const_operand(operand, span) {
536 Ok(val) => fields.push(val.llval),
537 Err(err) => if failure.is_ok() { failure = Err(err); }
543 mir::AggregateKind::Array => {
544 self.const_array(dest_ty, &fields)
546 mir::AggregateKind::Adt(..) |
547 mir::AggregateKind::Closure(..) |
548 mir::AggregateKind::Tuple => {
549 Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty)
554 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
555 let operand = self.const_operand(source, span)?;
556 let cast_ty = self.monomorphize(&cast_ty);
558 let val = match *kind {
559 mir::CastKind::ReifyFnPointer => {
560 match operand.ty.sty {
561 ty::TyFnDef(def_id, substs, _) => {
562 Callee::def(self.ccx, def_id, substs)
566 span_bug!(span, "{} cannot be reified to a fn ptr",
571 mir::CastKind::UnsafeFnPointer => {
572 // this is a no-op at the LLVM level
575 mir::CastKind::Unsize => {
576 // unsize targets other than to a fat pointer currently
577 // can't be in constants.
578 assert!(common::type_is_fat_ptr(self.ccx, cast_ty));
580 let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference)
581 .expect("consts: unsizing got non-pointer type").ty;
582 let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) {
583 // Normally, the source is a thin pointer and we are
584 // adding extra info to make a fat pointer. The exception
585 // is when we are upcasting an existing object fat pointer
586 // to use a different vtable. In that case, we want to
587 // load out the original data pointer so we can repackage
589 let (base, extra) = operand.get_fat_ptr();
592 (operand.llval, None)
595 let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference)
596 .expect("consts: unsizing got non-pointer target type").ty;
597 let ptr_ty = type_of::in_memory_type_of(self.ccx, unsized_ty).ptr_to();
598 let base = consts::ptrcast(base, ptr_ty);
599 let info = base::unsized_info(self.ccx, pointee_ty,
600 unsized_ty, old_info);
602 if old_info.is_none() {
603 let prev_const = self.ccx.const_unsized().borrow_mut()
604 .insert(base, operand.llval);
605 assert!(prev_const.is_none() || prev_const == Some(operand.llval));
607 assert_eq!(abi::FAT_PTR_ADDR, 0);
608 assert_eq!(abi::FAT_PTR_EXTRA, 1);
609 C_struct(self.ccx, &[base, info], false)
611 mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => {
612 debug_assert!(common::type_is_immediate(self.ccx, cast_ty));
613 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
614 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
615 let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty);
616 let llval = operand.llval;
617 let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
618 let l = self.ccx.layout_of(operand.ty);
619 adt::is_discr_signed(&l)
621 operand.ty.is_signed()
625 match (r_t_in, r_t_out) {
626 (CastTy::Int(_), CastTy::Int(_)) => {
627 let s = signed as llvm::Bool;
628 llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s)
630 (CastTy::Int(_), CastTy::Float) => {
632 llvm::LLVMConstSIToFP(llval, ll_t_out.to_ref())
634 llvm::LLVMConstUIToFP(llval, ll_t_out.to_ref())
637 (CastTy::Float, CastTy::Float) => {
638 llvm::LLVMConstFPCast(llval, ll_t_out.to_ref())
640 (CastTy::Float, CastTy::Int(IntTy::I)) => {
641 llvm::LLVMConstFPToSI(llval, ll_t_out.to_ref())
643 (CastTy::Float, CastTy::Int(_)) => {
644 llvm::LLVMConstFPToUI(llval, ll_t_out.to_ref())
646 (CastTy::Ptr(_), CastTy::Ptr(_)) |
647 (CastTy::FnPtr, CastTy::Ptr(_)) |
648 (CastTy::RPtr(_), CastTy::Ptr(_)) => {
649 consts::ptrcast(llval, ll_t_out)
651 (CastTy::Int(_), CastTy::Ptr(_)) => {
652 llvm::LLVMConstIntToPtr(llval, ll_t_out.to_ref())
654 (CastTy::Ptr(_), CastTy::Int(_)) |
655 (CastTy::FnPtr, CastTy::Int(_)) => {
656 llvm::LLVMConstPtrToInt(llval, ll_t_out.to_ref())
658 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
662 mir::CastKind::Misc => { // Casts from a fat-ptr.
663 let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty);
664 let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty);
665 if common::type_is_fat_ptr(self.ccx, operand.ty) {
666 let (data_ptr, meta_ptr) = operand.get_fat_ptr();
667 if common::type_is_fat_ptr(self.ccx, cast_ty) {
668 let ll_cft = ll_cast_ty.field_types();
669 let ll_fft = ll_from_ty.field_types();
670 let data_cast = consts::ptrcast(data_ptr, ll_cft[0]);
671 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
672 C_struct(self.ccx, &[data_cast, meta_ptr], false)
673 } else { // cast to thin-ptr
674 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
675 // pointer-cast of that pointer to desired pointer type.
676 consts::ptrcast(data_ptr, ll_cast_ty)
679 bug!("Unexpected non-fat-pointer operand")
683 Const::new(val, cast_ty)
686 mir::Rvalue::Ref(_, bk, ref lvalue) => {
687 let tr_lvalue = self.const_lvalue(lvalue, span)?;
689 let ty = tr_lvalue.ty;
690 let ref_ty = tcx.mk_ref(tcx.mk_region(ty::ReErased),
691 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
693 let base = match tr_lvalue.base {
694 Base::Value(llval) => {
695 // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
696 let align = if self.ccx.shared().type_is_sized(ty) {
697 type_of::align_of(self.ccx, ty)
699 self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign
701 if bk == mir::BorrowKind::Mut {
702 consts::addr_of_mut(self.ccx, llval, align, "ref_mut")
704 consts::addr_of(self.ccx, llval, align, "ref")
708 Base::Static(llval) => llval
711 let ptr = if self.ccx.shared().type_is_sized(ty) {
714 C_struct(self.ccx, &[base, tr_lvalue.llextra], false)
716 Const::new(ptr, ref_ty)
719 mir::Rvalue::Len(ref lvalue) => {
720 let tr_lvalue = self.const_lvalue(lvalue, span)?;
721 Const::new(tr_lvalue.len(self.ccx), tcx.types.usize)
724 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
725 let lhs = self.const_operand(lhs, span)?;
726 let rhs = self.const_operand(rhs, span)?;
728 let binop_ty = op.ty(tcx, lhs.ty, rhs.ty);
729 let (lhs, rhs) = (lhs.llval, rhs.llval);
730 Const::new(const_scalar_binop(op, lhs, rhs, ty), binop_ty)
733 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
734 let lhs = self.const_operand(lhs, span)?;
735 let rhs = self.const_operand(rhs, span)?;
737 let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
738 let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool], false);
739 let (lhs, rhs) = (lhs.llval, rhs.llval);
740 assert!(!ty.is_fp());
742 match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
743 Some((llval, of)) => {
744 let llof = C_bool(self.ccx, of);
745 Const::new(C_struct(self.ccx, &[llval, llof], false), binop_ty)
748 span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}",
749 rvalue, Value(lhs), Value(rhs));
754 mir::Rvalue::UnaryOp(op, ref operand) => {
755 let operand = self.const_operand(operand, span)?;
756 let lloperand = operand.llval;
757 let llval = match op {
760 llvm::LLVMConstNot(lloperand)
764 let is_float = operand.ty.is_fp();
767 llvm::LLVMConstFNeg(lloperand)
769 llvm::LLVMConstNeg(lloperand)
774 Const::new(llval, operand.ty)
777 _ => span_bug!(span, "{:?} in constant", rvalue)
780 debug!("const_rvalue({:?}: {:?} @ {:?}) = {:?}", rvalue, dest_ty, span, val);
787 fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
789 ty::TyInt(int_type) => const_to_opt_u128(value, true)
790 .and_then(|input| ConstInt::new_signed(input as i128, int_type,
791 tcx.sess.target.int_type)),
792 ty::TyUint(uint_type) => const_to_opt_u128(value, false)
793 .and_then(|input| ConstInt::new_unsigned(input, uint_type,
794 tcx.sess.target.uint_type)),
800 pub fn const_scalar_binop(op: mir::BinOp,
803 input_ty: Ty) -> ValueRef {
804 assert!(!input_ty.is_simd());
805 let is_float = input_ty.is_fp();
806 let signed = input_ty.is_signed();
810 mir::BinOp::Add if is_float => llvm::LLVMConstFAdd(lhs, rhs),
811 mir::BinOp::Add => llvm::LLVMConstAdd(lhs, rhs),
813 mir::BinOp::Sub if is_float => llvm::LLVMConstFSub(lhs, rhs),
814 mir::BinOp::Sub => llvm::LLVMConstSub(lhs, rhs),
816 mir::BinOp::Mul if is_float => llvm::LLVMConstFMul(lhs, rhs),
817 mir::BinOp::Mul => llvm::LLVMConstMul(lhs, rhs),
819 mir::BinOp::Div if is_float => llvm::LLVMConstFDiv(lhs, rhs),
820 mir::BinOp::Div if signed => llvm::LLVMConstSDiv(lhs, rhs),
821 mir::BinOp::Div => llvm::LLVMConstUDiv(lhs, rhs),
823 mir::BinOp::Rem if is_float => llvm::LLVMConstFRem(lhs, rhs),
824 mir::BinOp::Rem if signed => llvm::LLVMConstSRem(lhs, rhs),
825 mir::BinOp::Rem => llvm::LLVMConstURem(lhs, rhs),
827 mir::BinOp::BitXor => llvm::LLVMConstXor(lhs, rhs),
828 mir::BinOp::BitAnd => llvm::LLVMConstAnd(lhs, rhs),
829 mir::BinOp::BitOr => llvm::LLVMConstOr(lhs, rhs),
831 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
832 llvm::LLVMConstShl(lhs, rhs)
835 let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs);
836 if signed { llvm::LLVMConstAShr(lhs, rhs) }
837 else { llvm::LLVMConstLShr(lhs, rhs) }
839 mir::BinOp::Eq | mir::BinOp::Ne |
840 mir::BinOp::Lt | mir::BinOp::Le |
841 mir::BinOp::Gt | mir::BinOp::Ge => {
843 let cmp = base::bin_op_to_fcmp_predicate(op.to_hir_binop());
844 llvm::LLVMConstFCmp(cmp, lhs, rhs)
846 let cmp = base::bin_op_to_icmp_predicate(op.to_hir_binop(),
848 llvm::LLVMConstICmp(cmp, lhs, rhs)
855 pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
860 -> Option<(ValueRef, bool)> {
861 if let (Some(lhs), Some(rhs)) = (to_const_int(lllhs, input_ty, tcx),
862 to_const_int(llrhs, input_ty, tcx)) {
863 let result = match op {
864 mir::BinOp::Add => lhs + rhs,
865 mir::BinOp::Sub => lhs - rhs,
866 mir::BinOp::Mul => lhs * rhs,
867 mir::BinOp::Shl => lhs << rhs,
868 mir::BinOp::Shr => lhs >> rhs,
870 bug!("Operator `{:?}` is not a checkable operator", op)
874 let of = match result {
876 Err(ConstMathErr::Overflow(_)) |
877 Err(ConstMathErr::ShiftNegative) => true,
879 bug!("Operator `{:?}` on `{:?}` and `{:?}` errored: {}",
880 op, lhs, rhs, err.description());
884 Some((const_scalar_binop(op, lllhs, llrhs, input_ty), of))
890 impl<'a, 'tcx> MirContext<'a, 'tcx> {
891 pub fn trans_constant(&mut self,
892 bcx: &Builder<'a, 'tcx>,
893 constant: &mir::Constant<'tcx>)
896 debug!("trans_constant({:?})", constant);
897 let ty = self.monomorphize(&constant.ty);
898 let result = match constant.literal.clone() {
899 mir::Literal::Item { def_id, substs } => {
900 // Shortcut for zero-sized types, including function item
901 // types, which would not work with MirConstContext.
902 if common::type_is_zero_size(bcx.ccx, ty) {
903 let llty = type_of::type_of(bcx.ccx, ty);
904 return Const::new(C_null(llty), ty);
907 let substs = self.monomorphize(&substs);
908 let instance = Instance::new(def_id, substs);
909 MirConstContext::trans_def(bcx.ccx, instance, IndexVec::new())
911 mir::Literal::Promoted { index } => {
912 let mir = &self.mir.promoted[index];
913 MirConstContext::new(bcx.ccx, mir, self.param_substs, IndexVec::new()).trans()
915 mir::Literal::Value { value } => {
916 Ok(Const::from_constval(bcx.ccx, value, ty))
920 let result = result.unwrap_or_else(|_| {
921 // We've errored, so we don't have to produce working code.
922 let llty = type_of::type_of(bcx.ccx, ty);
923 Const::new(C_undef(llty), ty)
926 debug!("trans_constant({:?}) = {:?}", constant, result);
932 pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId)
933 -> Result<ValueRef, ConstEvalErr> {
934 let instance = Instance::mono(ccx.shared(), def_id);
935 MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval)
938 /// Construct a constant value, suitable for initializing a
939 /// GlobalVariable, given a case and constant values for its fields.
940 /// Note that this may have a different LLVM type (and different
941 /// alignment!) from the representation's `type_of`, so it needs a
942 /// pointer cast before use.
944 /// The LLVM type system does not directly support unions, and only
945 /// pointers can be bitcast, so a constant (and, by extension, the
946 /// GlobalVariable initialized by it) will have a type that can vary
947 /// depending on which case of an enum it is.
949 /// To understand the alignment situation, consider `enum E { V64(u64),
950 /// V32(u32, u32) }` on Windows. The type has 8-byte alignment to
951 /// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
952 /// i32, i32}`, which is 4-byte aligned.
954 /// Currently the returned value has the same size as the type, but
955 /// this could be changed in the future to avoid allocating unnecessary
956 /// space after values of shorter-than-maximum cases.
957 fn trans_const<'a, 'tcx>(
958 ccx: &CrateContext<'a, 'tcx>,
960 kind: &mir::AggregateKind,
963 let l = ccx.layout_of(t);
964 let dl = &ccx.tcx().data_layout;
965 let variant_index = match *kind {
966 mir::AggregateKind::Adt(_, index, _, _) => index,
970 layout::CEnum { discr: d, min, max, .. } => {
971 let discr = match *kind {
972 mir::AggregateKind::Adt(adt_def, _, _, _) => {
973 Disr::from(adt_def.variants[variant_index].disr_val)
977 assert_eq!(vals.len(), 0);
978 adt::assert_discr_in_range(Disr(min), Disr(max), discr);
979 C_integral(Type::from_integer(ccx, d), discr.0, true)
981 layout::General { discr: d, ref variants, .. } => {
982 let variant = &variants[variant_index];
983 let lldiscr = C_integral(Type::from_integer(ccx, d), variant_index as u64, true);
984 let mut vals_with_discr = vec![lldiscr];
985 vals_with_discr.extend_from_slice(vals);
986 let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]);
987 let needed_padding = l.size(dl).bytes() - variant.stride().bytes();
988 if needed_padding > 0 {
989 contents.push(padding(ccx, needed_padding));
991 C_struct(ccx, &contents[..], false)
993 layout::UntaggedUnion { ref variants, .. }=> {
994 assert_eq!(variant_index, 0);
995 let contents = build_const_union(ccx, variants, vals[0]);
996 C_struct(ccx, &contents, variants.packed)
998 layout::Univariant { ref variant, .. } => {
999 assert_eq!(variant_index, 0);
1000 let contents = build_const_struct(ccx, &variant, vals);
1001 C_struct(ccx, &contents[..], variant.packed)
1003 layout::Vector { .. } => {
1006 layout::RawNullablePointer { nndiscr, .. } => {
1007 let nnty = adt::compute_fields(ccx, t, nndiscr as usize, false)[0];
1008 if variant_index as u64 == nndiscr {
1009 assert_eq!(vals.len(), 1);
1012 C_null(type_of::sizing_type_of(ccx, nnty))
1015 layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
1016 if variant_index as u64 == nndiscr {
1017 C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false)
1019 let fields = adt::compute_fields(ccx, t, nndiscr as usize, false);
1020 let vals = fields.iter().map(|&ty| {
1021 // Always use null even if it's not the `discrfield`th
1022 // field; see #8506.
1023 C_null(type_of::sizing_type_of(ccx, ty))
1024 }).collect::<Vec<ValueRef>>();
1025 C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false)
1028 _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l)
1032 /// Building structs is a little complicated, because we might need to
1033 /// insert padding if a field's value is less aligned than its type.
1035 /// Continuing the example from `trans_const`, a value of type `(u32,
1036 /// E)` should have the `E` at offset 8, but if that field's
1037 /// initializer is 4-byte aligned then simply translating the tuple as
1038 /// a two-element struct will locate it at offset 4, and accesses to it
1039 /// will read the wrong memory.
1040 fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1041 st: &layout::Struct,
1044 assert_eq!(vals.len(), st.offsets.len());
1046 if vals.len() == 0 {
1050 // offset of current value
1052 let mut cfields = Vec::new();
1053 cfields.reserve(st.offsets.len()*2);
1055 let parts = st.field_index_by_increasing_offset().map(|i| {
1056 (&vals[i], st.offsets[i].bytes())
1058 for (&val, target_offset) in parts {
1059 if offset < target_offset {
1060 cfields.push(padding(ccx, target_offset - offset));
1061 offset = target_offset;
1063 assert!(!is_undef(val));
1065 offset += machine::llsize_of_alloc(ccx, val_ty(val));
1068 if offset < st.stride().bytes() {
1069 cfields.push(padding(ccx, st.stride().bytes() - offset));
1075 fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1077 field_val: ValueRef)
1079 let mut cfields = vec![field_val];
1081 let offset = machine::llsize_of_alloc(ccx, val_ty(field_val));
1082 let size = un.stride().bytes();
1084 cfields.push(padding(ccx, size - offset));
1090 fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
1091 C_undef(Type::array(&Type::i8(ccx), size))