1 use std::collections::{HashMap, HashSet};
4 use rustc::hir::def_id::DefId;
5 use rustc::hir::map::definitions::DefPathData;
6 use rustc::middle::const_val::ConstVal;
7 use rustc::middle::region::CodeExtent;
9 use rustc::traits::Reveal;
10 use rustc::ty::layout::{self, Layout, Size, Align};
11 use rustc::ty::subst::{Subst, Substs, Kind};
12 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Binder};
14 use rustc_data_structures::indexed_vec::Idx;
15 use syntax::codemap::{self, DUMMY_SP, Span};
16 use syntax::ast::{self, Mutability};
20 EvalError, EvalResult,
21 Global, GlobalId, Lvalue, LvalueExtra,
22 Memory, MemoryPointer, HasMemory,
25 PrimVal, PrimValKind, Value, Pointer,
30 pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> {
31 /// Stores data required by the `Machine`
32 pub machine_data: M::Data,
34 /// The results of the type checker, from rustc.
35 pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
37 /// The virtual memory system.
38 pub memory: Memory<'a, 'tcx, M>,
41 // FIXME(@RalfJung): validation branch
42 /// Lvalues that were suspended by the validation subsystem, and will be recovered later
43 pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
45 /// Precomputed statics, constants and promoteds.
46 pub globals: HashMap<GlobalId<'tcx>, Global<'tcx>>,
48 /// The virtual call stack.
49 pub(crate) stack: Vec<Frame<'tcx>>,
51 /// The maximum number of stack frames allowed
52 pub(crate) stack_limit: usize,
54 /// The maximum number of operations that may be executed.
55 /// This prevents infinite loops and huge computations from freezing up const eval.
56 /// Remove once halting problem is solved.
57 pub(crate) steps_remaining: u64,
61 pub struct Frame<'tcx> {
62 ////////////////////////////////////////////////////////////////////////////////
63 // Function and callsite information
64 ////////////////////////////////////////////////////////////////////////////////
66 /// The MIR for the function called on this frame.
67 pub mir: &'tcx mir::Mir<'tcx>,
69 /// The def_id and substs of the current function
70 pub instance: ty::Instance<'tcx>,
72 /// The span of the call site.
73 pub span: codemap::Span,
75 ////////////////////////////////////////////////////////////////////////////////
76 // Return lvalue and locals
77 ////////////////////////////////////////////////////////////////////////////////
79 /// The block to return to when returning from the current stack frame
80 pub return_to_block: StackPopCleanup,
82 /// The location where the result of the current stack frame should be written to.
83 pub return_lvalue: Lvalue<'tcx>,
85 /// The list of locals for this stack frame, stored in order as
86 /// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
87 /// `None` represents a local that is currently dead, while a live local
88 /// can either directly contain `PrimVal` or refer to some part of an `Allocation`.
90 /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`.
91 pub locals: Vec<Option<Value>>,
93 ////////////////////////////////////////////////////////////////////////////////
94 // Current position within the function
95 ////////////////////////////////////////////////////////////////////////////////
97 /// The block that is currently executed (or will be executed after the above call stacks
99 pub block: mir::BasicBlock,
101 /// The index of the currently evaluated statment.
105 #[derive(Clone, Debug, Eq, PartialEq, Hash)]
106 pub enum StackPopCleanup {
107 /// The stackframe existed to compute the initial value of a static/constant, make sure it
108 /// isn't modifyable afterwards in case of constants.
109 /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through
110 /// references or deallocated
111 MarkStatic(Mutability),
112 /// A regular stackframe added due to a function call will need to get forwarded to the next
114 Goto(mir::BasicBlock),
115 /// The main function and diverging functions have nowhere to return to
119 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
120 pub struct DynamicLifetime {
122 pub region: Option<CodeExtent>, // "None" indicates "until the function ends"
125 #[derive(Copy, Clone, Debug)]
126 pub struct ResourceLimits {
127 pub memory_size: u64,
129 pub stack_limit: usize,
132 impl Default for ResourceLimits {
133 fn default() -> Self {
135 memory_size: 100 * 1024 * 1024, // 100 MB
136 step_limit: 1_000_000,
142 #[derive(Copy, Clone, Debug)]
143 pub struct TyAndPacked<'tcx> {
148 impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
150 tcx: TyCtxt<'a, 'tcx, 'tcx>,
151 limits: ResourceLimits,
152 machine_data: M::Data,
153 memory_data: M::MemoryData,
158 memory: Memory::new(&tcx.data_layout, limits.memory_size, memory_data),
159 suspended: HashMap::new(),
160 globals: HashMap::new(),
162 stack_limit: limits.stack_limit,
163 steps_remaining: limits.step_limit,
167 pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> {
168 let substs = self.substs();
169 self.alloc_ptr_with_substs(ty, substs)
172 pub fn alloc_ptr_with_substs(
175 substs: &'tcx Substs<'tcx>
176 ) -> EvalResult<'tcx, MemoryPointer> {
177 let size = self.type_size_with_substs(ty, substs)?.expect("cannot alloc memory for unsized type");
178 let align = self.type_align_with_substs(ty, substs)?;
179 self.memory.allocate(size, align, MemoryKind::Stack)
182 pub fn memory(&self) -> &Memory<'a, 'tcx, M> {
186 pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
190 pub fn stack(&self) -> &[Frame<'tcx>] {
195 pub fn cur_frame(&self) -> usize {
196 assert!(self.stack.len() > 0);
200 /// Returns true if the current frame or any parent frame is part of a ctfe.
202 /// Used to disable features in const eval, which do not have a rfc enabling
203 /// them or which can't be written in a way that they produce the same output
204 /// that evaluating the code at runtime would produce.
205 pub fn const_env(&self) -> bool {
206 for frame in self.stack.iter().rev() {
207 if let StackPopCleanup::MarkStatic(_) = frame.return_to_block {
214 pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
215 let ptr = self.memory.allocate_cached(s.as_bytes())?;
216 Ok(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::from_u128(s.len() as u128)))
219 pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
220 use rustc::middle::const_val::ConstVal::*;
221 use rustc_const_math::ConstFloat;
223 let primval = match *const_val {
224 Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()),
226 Float(ConstFloat::F32(f)) => PrimVal::from_f32(f),
227 Float(ConstFloat::F64(f)) => PrimVal::from_f64(f),
229 Bool(b) => PrimVal::from_bool(b),
230 Char(c) => PrimVal::from_char(c),
232 Str(ref s) => return self.str_to_value(s),
235 let ptr = self.memory.allocate_cached(bs)?;
239 Variant(_) => unimplemented!(),
240 Struct(_) => unimplemented!(),
241 Tuple(_) => unimplemented!(),
242 // function items are zero sized and thus have no readable value
243 Function(..) => PrimVal::Undef,
244 Array(_) => unimplemented!(),
245 Repeat(_, _) => unimplemented!(),
248 Ok(Value::ByVal(primval))
251 pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
252 // generics are weird, don't run this function on a generic
253 assert!(!ty.needs_subst());
254 ty.is_sized(self.tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
257 pub fn load_mir(&self, instance: ty::InstanceDef<'tcx>) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
258 trace!("load mir {:?}", instance);
260 ty::InstanceDef::Item(def_id) => self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| EvalError::NoMirFor(self.tcx.item_path_str(def_id))),
261 _ => Ok(self.tcx.instance_mir(instance)),
265 pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
266 // miri doesn't care about lifetimes, and will choke on some crazy ones
267 // let's simply get rid of them
268 let without_lifetimes = self.tcx.erase_regions(&ty);
269 let substituted = without_lifetimes.subst(self.tcx, substs);
270 self.tcx.normalize_associated_type(&substituted)
273 pub fn erase_lifetimes<T>(&self, value: &Binder<T>) -> T
274 where T : TypeFoldable<'tcx>
276 let value = self.tcx.erase_late_bound_regions(value);
277 self.tcx.erase_regions(&value)
280 pub fn size_and_align_of_dst(
283 value: Value, // This has to be a fat ptr; we only care about the "extra" data in it.
284 ) -> EvalResult<'tcx, (u64, u64)> {
285 if let Some(size) = self.type_size(ty)? {
286 Ok((size as u64, self.type_align(ty)? as u64))
289 ty::TyAdt(..) | ty::TyTuple(..) => {
290 // First get the size of all statically known fields.
291 // Don't use type_of::sizing_type_of because that expects t to be sized,
292 // and it also rounds up to alignment, which we want to avoid,
293 // as the unsized field's alignment could be smaller.
294 assert!(!ty.is_simd());
295 let layout = self.type_layout(ty)?;
296 debug!("DST {} layout: {:?}", ty, layout);
298 let (sized_size, sized_align) = match *layout {
299 ty::layout::Layout::Univariant { ref variant, .. } => {
300 (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align)
303 bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
307 debug!("DST {} statically sized prefix size: {} align: {:?}",
308 ty, sized_size, sized_align);
310 // Recurse to get the size of the dynamically sized field (must be
312 let (unsized_size, unsized_align) = match ty.sty {
313 ty::TyAdt(def, substs) => {
314 let last_field = def.struct_variant().fields.last().unwrap();
315 let field_ty = self.field_ty(substs, last_field);
316 self.size_and_align_of_dst(field_ty, value)?
318 ty::TyTuple(ref types, _) => {
319 let field_ty = types.last().unwrap();
320 let field_ty = self.tcx.normalize_associated_type(field_ty);
321 self.size_and_align_of_dst(field_ty, value)?
323 _ => bug!("We already checked that we know this type"),
326 // FIXME (#26403, #27023): We should be adding padding
327 // to `sized_size` (to accommodate the `unsized_align`
328 // required of the unsized field that follows) before
329 // summing it with `sized_size`. (Note that since #26403
330 // is unfixed, we do not yet add the necessary padding
331 // here. But this is where the add would go.)
333 // Return the sum of sizes and max of aligns.
334 let size = sized_size + unsized_size;
336 // Choose max of two known alignments (combined value must
337 // be aligned according to more restrictive of the two).
338 let align = sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap());
340 // Issue #27023: must add any necessary padding to `size`
341 // (to make it a multiple of `align`) before returning it.
343 // Namely, the returned size should be, in C notation:
345 // `size + ((size & (align-1)) ? align : 0)`
347 // emulated via the semi-standard fast bit trick:
349 // `(size + (align-1)) & -align`
351 let size = Size::from_bytes(size).abi_align(align).bytes();
352 Ok((size, align.abi()))
354 ty::TyDynamic(..) => {
355 let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?;
356 // the second entry in the vtable is the dynamic size of the object.
357 self.read_size_and_align_from_vtable(vtable)
360 ty::TySlice(_) | ty::TyStr => {
361 let elem_ty = ty.sequence_element_type(self.tcx);
362 let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64;
363 let (_, len) = value.into_slice(&mut self.memory)?;
364 let align = self.type_align(elem_ty)?;
365 Ok((len * elem_size, align as u64))
368 _ => bug!("size_of_val::<{:?}>", ty),
373 /// Returns the normalized type of a struct field
376 param_substs: &Substs<'tcx>,
379 self.tcx.normalize_associated_type(&f.ty(self.tcx, param_substs))
382 pub fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
383 self.type_size_with_substs(ty, self.substs())
386 pub fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
387 self.type_align_with_substs(ty, self.substs())
390 fn type_size_with_substs(
393 substs: &'tcx Substs<'tcx>,
394 ) -> EvalResult<'tcx, Option<u64>> {
395 let layout = self.type_layout_with_substs(ty, substs)?;
396 if layout.is_unsized() {
399 Ok(Some(layout.size(&self.tcx.data_layout).bytes()))
403 fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, u64> {
404 self.type_layout_with_substs(ty, substs).map(|layout| layout.align(&self.tcx.data_layout).abi())
407 pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
408 self.type_layout_with_substs(ty, self.substs())
411 fn type_layout_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
412 // TODO(solson): Is this inefficient? Needs investigation.
413 let ty = self.monomorphize(ty, substs);
415 ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All)).map_err(EvalError::Layout)
418 pub fn push_stack_frame(
420 instance: ty::Instance<'tcx>,
422 mir: &'tcx mir::Mir<'tcx>,
423 return_lvalue: Lvalue<'tcx>,
424 return_to_block: StackPopCleanup,
425 ) -> EvalResult<'tcx> {
426 ::log_settings::settings().indentation += 1;
428 /// Return the set of locals that have a storage annotation anywhere
429 fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet<mir::Local> {
430 use rustc::mir::StatementKind::*;
432 let mut set = HashSet::new();
433 for block in mir.basic_blocks() {
434 for stmt in block.statements.iter() {
436 StorageLive(mir::Lvalue::Local(local)) | StorageDead(mir::Lvalue::Local(local)) => {
446 // Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local
448 let annotated_locals = collect_storage_annotations(mir);
449 let num_locals = mir.local_decls.len() - 1;
450 let mut locals = vec![None; num_locals];
451 for i in 0..num_locals {
452 let local = mir::Local::new(i+1);
453 if !annotated_locals.contains(&local) {
454 locals[i] = Some(Value::ByVal(PrimVal::Undef));
458 self.stack.push(Frame {
460 block: mir::START_BLOCK,
469 let cur_frame = self.cur_frame();
470 self.memory.set_cur_frame(cur_frame);
472 if self.stack.len() > self.stack_limit {
473 Err(EvalError::StackFrameLimitReached)
479 pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
480 ::log_settings::settings().indentation -= 1;
481 self.memory.locks_lifetime_ended(None);
482 let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
483 if !self.stack.is_empty() {
484 // TODO: IS this the correct time to start considering these accesses as originating from the returned-to stack frame?
485 let cur_frame = self.cur_frame();
486 self.memory.set_cur_frame(cur_frame);
488 match frame.return_to_block {
489 StackPopCleanup::MarkStatic(mutable) => if let Lvalue::Global(id) = frame.return_lvalue {
490 let global_value = self.globals.get_mut(&id)
491 .expect("global should have been cached (static)");
492 match global_value.value {
493 // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
494 Value::ByRef { ptr, aligned: _aligned } =>
495 // Alignment does not matter for this call
496 self.memory.mark_static_initalized(ptr.to_ptr()?.alloc_id, mutable)?,
497 Value::ByVal(val) => if let PrimVal::Ptr(ptr) = val {
498 self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?;
500 Value::ByValPair(val1, val2) => {
501 if let PrimVal::Ptr(ptr) = val1 {
502 self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?;
504 if let PrimVal::Ptr(ptr) = val2 {
505 self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?;
509 // see comment on `initialized` field
510 assert!(!global_value.initialized);
511 global_value.initialized = true;
512 assert_eq!(global_value.mutable, Mutability::Mutable);
513 global_value.mutable = mutable;
515 bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue);
517 StackPopCleanup::Goto(target) => self.goto_block(target),
518 StackPopCleanup::None => {},
520 // deallocate all locals that are backed by an allocation
521 for local in frame.locals {
522 self.deallocate_local(local)?;
528 pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
529 if let Some(Value::ByRef { ptr, aligned: _ }) = local {
530 trace!("deallocating local");
531 let ptr = ptr.to_ptr()?;
532 self.memory.dump_alloc(ptr.alloc_id);
533 match self.memory.get(ptr.alloc_id)?.kind {
534 // for a constant like `const FOO: &i32 = &1;` the local containing
535 // the `1` is referred to by the global. We transitively marked everything
536 // the global refers to as static itself, so we don't free it here
537 MemoryKind::Static => {}
538 MemoryKind::Stack => self.memory.deallocate(ptr, None, MemoryKind::Stack)?,
539 other => bug!("local contained non-stack memory: {:?}", other),
545 pub fn assign_discr_and_fields(
550 operands: &[mir::Operand<'tcx>],
554 ) -> EvalResult<'tcx> {
556 let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
558 let discr_dest = dest_ptr.offset(discr_offset, &self)?;
559 self.memory.write_uint(discr_dest, discr_val, discr_size)?;
561 let dest = Lvalue::Ptr {
562 ptr: dest_ptr.into(),
563 extra: LvalueExtra::DowncastVariant(variant_idx),
567 self.assign_fields(dest, dest_ty, operands)
570 pub fn assign_fields(
574 operands: &[mir::Operand<'tcx>],
575 ) -> EvalResult<'tcx> {
576 if self.type_size(dest_ty)? == Some(0) {
577 // zst assigning is a nop
580 if self.ty_to_primval_kind(dest_ty).is_ok() {
581 assert_eq!(operands.len(), 1);
582 let value = self.eval_operand(&operands[0])?;
583 let value_ty = self.operand_ty(&operands[0]);
584 return self.write_value(value, dest, value_ty);
586 for (field_index, operand) in operands.iter().enumerate() {
587 let value = self.eval_operand(operand)?;
588 let value_ty = self.operand_ty(operand);
589 let field_dest = self.lvalue_field(dest, field_index, dest_ty, value_ty)?;
590 self.write_value(value, field_dest, value_ty)?;
595 /// Evaluate an assignment statement.
597 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
598 /// type writes its results directly into the memory specified by the lvalue.
599 pub(super) fn eval_rvalue_into_lvalue(
601 rvalue: &mir::Rvalue<'tcx>,
602 lvalue: &mir::Lvalue<'tcx>,
603 ) -> EvalResult<'tcx> {
604 let dest = self.eval_lvalue(lvalue)?;
605 let dest_ty = self.lvalue_ty(lvalue);
606 let dest_layout = self.type_layout(dest_ty)?;
608 use rustc::mir::Rvalue::*;
610 Use(ref operand) => {
611 let value = self.eval_operand(operand)?;
612 self.write_value(value, dest, dest_ty)?;
615 BinaryOp(bin_op, ref left, ref right) => {
616 if self.intrinsic_overflowing(bin_op, left, right, dest, dest_ty)? {
617 // There was an overflow in an unchecked binop. Right now, we consider this an error and bail out.
618 // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops
619 // it emits in debug mode) is performance, but it doesn't cost us any performance in miri.
620 // If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops,
621 // we have to go back to just ignoring the overflow here.
622 return Err(EvalError::OverflowingMath);
626 CheckedBinaryOp(bin_op, ref left, ref right) => {
627 self.intrinsic_with_overflow(bin_op, left, right, dest, dest_ty)?;
630 UnaryOp(un_op, ref operand) => {
631 let val = self.eval_operand_to_primval(operand)?;
632 let kind = self.ty_to_primval_kind(dest_ty)?;
633 self.write_primval(dest, operator::unary_op(un_op, val, kind)?, dest_ty)?;
636 // Skip everything for zsts
637 Aggregate(..) if self.type_size(dest_ty)? == Some(0) => {}
639 Aggregate(ref kind, ref operands) => {
640 self.inc_step_counter_and_check_limit(operands.len() as u64)?;
641 use rustc::ty::layout::Layout::*;
643 Univariant { .. } | Array { .. } => {
644 self.assign_fields(dest, dest_ty, operands)?;
647 General { discr, ref variants, .. } => {
648 if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
649 let discr_val = adt_def.discriminants(self.tcx)
651 .expect("broken mir: Adt variant id invalid")
652 .to_u128_unchecked();
653 let discr_size = discr.size().bytes();
655 self.assign_discr_and_fields(
658 variants[variant].offsets[0].bytes(),
665 bug!("tried to assign {:?} to Layout::General", kind);
669 RawNullablePointer { nndiscr, .. } => {
670 if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
671 if nndiscr == variant as u64 {
672 assert_eq!(operands.len(), 1);
673 let operand = &operands[0];
674 let value = self.eval_operand(operand)?;
675 let value_ty = self.operand_ty(operand);
676 self.write_value(value, dest, value_ty)?;
678 if let Some(operand) = operands.get(0) {
679 assert_eq!(operands.len(), 1);
680 let operand_ty = self.operand_ty(operand);
681 assert_eq!(self.type_size(operand_ty)?, Some(0));
683 self.write_null(dest, dest_ty)?;
686 bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
690 StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
691 if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
692 if nndiscr == variant as u64 {
693 self.assign_fields(dest, dest_ty, operands)?;
695 for operand in operands {
696 let operand_ty = self.operand_ty(operand);
697 assert_eq!(self.type_size(operand_ty)?, Some(0));
699 let (offset, TyAndPacked { ty, packed: _}) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
700 // TODO: The packed flag is ignored
703 let dest = self.force_allocation(dest)?.to_ptr()?;
705 let dest = dest.offset(offset.bytes(), &self)?;
706 let dest_size = self.type_size(ty)?
707 .expect("bad StructWrappedNullablePointer discrfield");
708 self.memory.write_int(dest, 0, dest_size)?;
711 bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
716 assert_eq!(operands.len(), 0);
717 if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
718 let n = adt_def.discriminants(self.tcx)
720 .expect("broken mir: Adt variant index invalid")
721 .to_u128_unchecked();
722 self.write_primval(dest, PrimVal::Bytes(n), dest_ty)?;
724 bug!("tried to assign {:?} to Layout::CEnum", kind);
728 Vector { count, .. } => {
729 debug_assert_eq!(count, operands.len() as u64);
730 self.assign_fields(dest, dest_ty, operands)?;
733 UntaggedUnion { .. } => {
734 assert_eq!(operands.len(), 1);
735 let operand = &operands[0];
736 let value = self.eval_operand(operand)?;
737 let value_ty = self.operand_ty(operand);
738 self.write_value(value, dest, value_ty)?;
742 return Err(EvalError::Unimplemented(format!(
743 "can't handle destination layout {:?} when assigning {:?}",
751 Repeat(ref operand, _) => {
752 let (elem_ty, length) = match dest_ty.sty {
753 ty::TyArray(elem_ty, n) => (elem_ty, n as u64),
754 _ => bug!("tried to assign array-repeat to non-array type {:?}", dest_ty),
756 self.inc_step_counter_and_check_limit(length)?;
757 let elem_size = self.type_size(elem_ty)?
758 .expect("repeat element type must be sized");
759 let value = self.eval_operand(operand)?;
762 let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
765 let elem_dest = dest.offset(i * elem_size, &self)?;
766 self.write_value_to_ptr(value, elem_dest, elem_ty)?;
771 // FIXME(CTFE): don't allow computing the length of arrays in const eval
772 let src = self.eval_lvalue(lvalue)?;
773 let ty = self.lvalue_ty(lvalue);
774 let (_, len) = src.elem_ty_and_len(ty);
775 self.write_primval(dest, PrimVal::from_u128(len as u128), dest_ty)?;
778 Ref(_, _, ref lvalue) => {
779 let src = self.eval_lvalue(lvalue)?;
780 // We ignore the alignment of the lvalue here -- special handling for packed structs ends
781 // at the `&` operator.
782 let (ptr, extra, _aligned) = self.force_allocation(src)?.to_ptr_extra_aligned();
784 let val = match extra {
785 LvalueExtra::None => ptr.to_value(),
786 LvalueExtra::Length(len) => ptr.to_value_with_len(len),
787 LvalueExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
788 LvalueExtra::DowncastVariant(..) =>
789 bug!("attempted to take a reference to an enum downcast lvalue"),
791 self.write_value(val, dest, dest_ty)?;
794 NullaryOp(mir::NullOp::Box, ty) => {
795 let ptr = M::box_alloc(self, ty)?;
796 self.write_primval(dest, ptr, dest_ty)?;
799 NullaryOp(mir::NullOp::SizeOf, ty) => {
800 let size = self.type_size(ty)?.expect("SizeOf nullary MIR operator called for unsized type");
801 self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?;
804 Cast(kind, ref operand, cast_ty) => {
805 debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
806 use rustc::mir::CastKind::*;
809 let src = self.eval_operand(operand)?;
810 let src_ty = self.operand_ty(operand);
811 self.unsize_into(src, src_ty, dest, dest_ty)?;
815 let src = self.eval_operand(operand)?;
816 let src_ty = self.operand_ty(operand);
817 if self.type_is_fat_ptr(src_ty) {
818 match (src, self.type_is_fat_ptr(dest_ty)) {
819 (Value::ByRef{..}, _) |
820 (Value::ByValPair(..), true) => {
821 self.write_value(src, dest, dest_ty)?;
823 (Value::ByValPair(data, _), false) => {
824 self.write_value(Value::ByVal(data), dest, dest_ty)?;
826 (Value::ByVal(_), _) => bug!("expected fat ptr"),
829 let src_val = self.value_to_primval(src, src_ty)?;
830 let dest_val = self.cast_primval(src_val, src_ty, dest_ty)?;
831 self.write_value(Value::ByVal(dest_val), dest, dest_ty)?;
835 ReifyFnPointer => match self.operand_ty(operand).sty {
836 ty::TyFnDef(def_id, substs) => {
837 let instance = resolve(self.tcx, def_id, substs);
838 let fn_ptr = self.memory.create_fn_alloc(instance);
839 self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?;
841 ref other => bug!("reify fn pointer on {:?}", other),
844 UnsafeFnPointer => match dest_ty.sty {
846 let src = self.eval_operand(operand)?;
847 self.write_value(src, dest, dest_ty)?;
849 ref other => bug!("fn to unsafe fn cast on {:?}", other),
852 ClosureFnPointer => match self.operand_ty(operand).sty {
853 ty::TyClosure(def_id, substs) => {
854 let instance = resolve_closure(self.tcx, def_id, substs, ty::ClosureKind::FnOnce);
855 let fn_ptr = self.memory.create_fn_alloc(instance);
856 self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?;
858 ref other => bug!("closure fn pointer on {:?}", other),
863 Discriminant(ref lvalue) => {
864 let lval = self.eval_lvalue(lvalue)?;
865 let ty = self.lvalue_ty(lvalue);
866 let ptr = self.force_allocation(lval)?.to_ptr()?;
867 let discr_val = self.read_discriminant_value(ptr, ty)?;
868 if let ty::TyAdt(adt_def, _) = ty.sty {
869 if adt_def.discriminants(self.tcx).all(|v| discr_val != v.to_u128_unchecked()) {
870 return Err(EvalError::InvalidDiscriminant);
873 bug!("rustc only generates Rvalue::Discriminant for enums");
875 self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
879 if log_enabled!(::log::LogLevel::Trace) {
880 self.dump_local(dest);
886 pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
888 ty::TyRawPtr(ref tam) |
889 ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty),
890 ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
895 pub(super) fn nonnull_offset_and_ty(
900 ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
901 // Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant
902 let path = discrfield.iter().skip(2).map(|&i| i as usize);
904 // Handle the field index for the outer non-null variant.
905 let (inner_offset, inner_ty) = match ty.sty {
906 ty::TyAdt(adt_def, substs) => {
907 let variant = &adt_def.variants[nndiscr as usize];
908 let index = discrfield[1];
909 let field = &variant.fields[index as usize];
910 (self.get_field_offset(ty, index as usize)?, field.ty(self.tcx, substs))
912 _ => bug!("non-enum for StructWrappedNullablePointer: {}", ty),
915 self.field_path_offset_and_ty(inner_offset, inner_ty, path)
918 fn field_path_offset_and_ty<I: Iterator<Item = usize>>(
923 ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
924 // Skip the initial 0 intended for LLVM GEP.
925 let mut packed = false;
926 for field_index in path {
927 let field_offset = self.get_field_offset(ty, field_index)?;
928 trace!("field_path_offset_and_ty: {}, {}, {:?}, {:?}", field_index, ty, field_offset, offset);
929 let field_ty = self.get_field_ty(ty, field_index)?;
931 packed = packed || field_ty.packed;
932 offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap();
935 Ok((offset, TyAndPacked { ty, packed }))
937 fn get_fat_field(&self, pointee_ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> {
938 match (field_index, &self.tcx.struct_tail(pointee_ty).sty) {
940 (1, &ty::TySlice(_)) => Ok(self.tcx.types.usize),
941 (1, &ty::TyDynamic(..)) |
942 (0, _) => Ok(self.tcx.mk_imm_ptr(self.tcx.types.u8)),
943 _ => bug!("invalid fat pointee type: {}", pointee_ty),
947 /// Returns the field type and whether the field is packed
948 pub fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, TyAndPacked<'tcx>> {
950 ty::TyAdt(adt_def, _) if adt_def.is_box() =>
951 Ok(TyAndPacked { ty: self.get_fat_field(ty.boxed_ty(), field_index)?, packed: false }),
952 ty::TyAdt(adt_def, substs) if adt_def.is_enum() => {
953 use rustc::ty::layout::Layout::*;
954 match *self.type_layout(ty)? {
955 RawNullablePointer { nndiscr, .. } =>
956 Ok(TyAndPacked { ty: adt_def.variants[nndiscr as usize].fields[field_index].ty(self.tcx, substs), packed: false }),
957 StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
958 let ty = adt_def.variants[nndiscr as usize].fields[field_index].ty(self.tcx, substs);
959 Ok(TyAndPacked { ty, packed: nonnull.packed })
961 _ => Err(EvalError::Unimplemented(format!("get_field_ty can't handle enum type: {:?}, {:?}", ty, ty.sty))),
964 ty::TyAdt(adt_def, substs) => {
965 let variant_def = adt_def.struct_variant();
966 use rustc::ty::layout::Layout::*;
967 match *self.type_layout(ty)? {
968 UntaggedUnion { ref variants } =>
969 Ok(TyAndPacked { ty: variant_def.fields[field_index].ty(self.tcx, substs), packed: variants.packed }),
970 Univariant { ref variant, .. } =>
971 Ok(TyAndPacked { ty: variant_def.fields[field_index].ty(self.tcx, substs), packed: variant.packed }),
972 _ => Err(EvalError::Unimplemented(format!("get_field_ty can't handle struct type: {:?}, {:?}", ty, ty.sty))),
976 ty::TyTuple(fields, _) => Ok(TyAndPacked { ty: fields[field_index], packed: false }),
978 ty::TyRef(_, ref tam) |
979 ty::TyRawPtr(ref tam) => Ok(TyAndPacked { ty: self.get_fat_field(tam.ty, field_index)?, packed: false }),
981 ty::TyArray(ref inner, _) => Ok(TyAndPacked { ty: inner, packed: false }),
983 _ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}, {:?}", ty, ty.sty))),
987 fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
988 // Also see lvalue_field in lvalue.rs, which handles more cases but needs an actual value at the given type
989 let layout = self.type_layout(ty)?;
991 use rustc::ty::layout::Layout::*;
993 Univariant { ref variant, .. } => {
994 Ok(variant.offsets[field_index])
996 FatPointer { .. } => {
997 let bytes = field_index as u64 * self.memory.pointer_size();
998 Ok(Size::from_bytes(bytes))
1000 StructWrappedNullablePointer { ref nonnull, .. } => {
1001 Ok(nonnull.offsets[field_index])
1003 UntaggedUnion { .. } => Ok(Size::from_bytes(0)),
1005 let msg = format!("get_field_offset: can't handle type: {:?}, with layout: {:?}", ty, layout);
1006 Err(EvalError::Unimplemented(msg))
1011 pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
1012 let layout = self.type_layout(ty)?;
1014 use rustc::ty::layout::Layout::*;
1016 Univariant { ref variant, .. } => Ok(variant.offsets.len() as u64),
1017 FatPointer { .. } => Ok(2),
1018 StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets.len() as u64),
1019 Vector { count , .. } |
1020 Array { count, .. } => Ok(count),
1021 Scalar { .. } => Ok(0),
1022 UntaggedUnion { .. } => Ok(1),
1024 let msg = format!("get_field_count: can't handle type: {:?}, with layout: {:?}", ty, layout);
1025 Err(EvalError::Unimplemented(msg))
1030 pub(super) fn eval_operand_to_primval(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, PrimVal> {
1031 let value = self.eval_operand(op)?;
1032 let ty = self.operand_ty(op);
1033 self.value_to_primval(value, ty)
1036 pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Value> {
1037 use rustc::mir::Operand::*;
1039 Consume(ref lvalue) => self.eval_and_read_lvalue(lvalue),
1041 Constant(ref constant) => {
1042 use rustc::mir::Literal;
1043 let mir::Constant { ref literal, .. } = **constant;
1044 let value = match *literal {
1045 Literal::Value { ref value } => self.const_to_value(value)?,
1047 Literal::Item { def_id, substs } => {
1048 let instance = self.resolve_associated_const(def_id, substs);
1049 let cid = GlobalId { instance, promoted: None };
1050 self.globals.get(&cid).expect("static/const not cached").value
1053 Literal::Promoted { index } => {
1054 let cid = GlobalId {
1055 instance: self.frame().instance,
1056 promoted: Some(index),
1058 self.globals.get(&cid).expect("promoted not cached").value
1067 pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
1068 self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
1071 fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
1072 let size = self.type_size(ty)?.expect("cannot copy from an unsized type");
1073 let align = self.type_align(ty)?;
1074 self.memory.copy(src, dest, size, align, false)?;
1078 pub fn force_allocation(
1080 lvalue: Lvalue<'tcx>,
1081 ) -> EvalResult<'tcx, Lvalue<'tcx>> {
1082 let new_lvalue = match lvalue {
1083 Lvalue::Local { frame, local } => {
1084 // -1 since we don't store the return value
1085 match self.stack[frame].locals[local.index() - 1] {
1086 None => return Err(EvalError::DeadLocal),
1087 Some(Value::ByRef { ptr, aligned }) => {
1088 Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None }
1091 let ty = self.stack[frame].mir.local_decls[local].ty;
1092 let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
1093 let substs = self.stack[frame].instance.substs;
1094 let ptr = self.alloc_ptr_with_substs(ty, substs)?;
1095 self.stack[frame].locals[local.index() - 1] = Some(Value::by_ref(ptr.into())); // it stays live
1096 self.write_value_to_ptr(val, ptr.into(), ty)?;
1097 Lvalue::from_ptr(ptr)
1101 Lvalue::Ptr { .. } => lvalue,
1102 Lvalue::Global(cid) => {
1103 let global_val = self.globals.get(&cid).expect("global not cached").clone();
1104 match global_val.value {
1105 Value::ByRef { ptr, aligned } =>
1106 Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None },
1108 let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.instance.substs)?;
1109 self.memory.mark_static(ptr.alloc_id);
1110 self.write_value_to_ptr(global_val.value, ptr.into(), global_val.ty)?;
1111 // see comment on `initialized` field
1112 if global_val.initialized {
1113 self.memory.mark_static_initalized(ptr.alloc_id, global_val.mutable)?;
1115 let lval = self.globals.get_mut(&cid).expect("already checked");
1117 value: Value::by_ref(ptr.into()),
1120 Lvalue::from_ptr(ptr)
1128 /// ensures this Value is not a ByRef
1129 pub(super) fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1131 Value::ByRef { ptr, aligned } => {
1132 self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
1138 pub fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
1139 match self.follow_by_ref_value(value, ty)? {
1140 Value::ByRef{..} => bug!("follow_by_ref_value can't result in `ByRef`"),
1142 Value::ByVal(primval) => {
1143 self.ensure_valid_value(primval, ty)?;
1147 Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
1155 ) -> EvalResult<'tcx> {
1156 self.write_primval(dest, PrimVal::Bytes(0), dest_ty)
1164 ) -> EvalResult<'tcx> {
1165 self.write_value(val.to_value(), dest, dest_ty)
1168 pub fn write_primval(
1173 ) -> EvalResult<'tcx> {
1174 self.write_value(Value::ByVal(val), dest, dest_ty)
1182 ) -> EvalResult<'tcx> {
1183 //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
1184 // Note that it is really important that the type here is the right one, and matches the type things are read at.
1185 // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only
1186 // correct if we never look at this data with the wrong type.
1189 Lvalue::Global(cid) => {
1190 let dest = self.globals.get_mut(&cid).expect("global should be cached").clone();
1191 if dest.mutable == Mutability::Immutable {
1192 return Err(EvalError::ModifiedConstantMemory);
1194 let write_dest = |this: &mut Self, val| {
1195 *this.globals.get_mut(&cid).expect("already checked") = Global {
1201 self.write_value_possibly_by_val(src_val, write_dest, dest.value, dest_ty)
1204 Lvalue::Ptr { ptr, extra, aligned } => {
1205 assert_eq!(extra, LvalueExtra::None);
1206 self.write_maybe_aligned_mut(aligned,
1207 |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty))
1210 Lvalue::Local { frame, local } => {
1211 let dest = self.stack[frame].get_local(local)?;
1212 self.write_value_possibly_by_val(
1214 |this, val| this.stack[frame].set_local(local, val),
1222 // The cases here can be a bit subtle. Read carefully!
1223 fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
1227 old_dest_val: Value,
1229 ) -> EvalResult<'tcx> {
1230 if let Value::ByRef { ptr: dest_ptr, aligned } = old_dest_val {
1231 // If the value is already `ByRef` (that is, backed by an `Allocation`),
1232 // then we must write the new value into this allocation, because there may be
1233 // other pointers into the allocation. These other pointers are logically
1234 // pointers into the local variable, and must be able to observe the change.
1236 // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
1237 // knew for certain that there were no outstanding pointers to this allocation.
1238 self.write_maybe_aligned_mut(aligned,
1239 |ectx| ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty))?;
1241 } else if let Value::ByRef { ptr: src_ptr, aligned } = src_val {
1242 // If the value is not `ByRef`, then we know there are no pointers to it
1243 // and we can simply overwrite the `Value` in the locals array directly.
1245 // In this specific case, where the source value is `ByRef`, we must duplicate
1246 // the allocation, because this is a by-value operation. It would be incorrect
1247 // if they referred to the same allocation, since then a change to one would
1248 // implicitly change the other.
1250 // It is a valid optimization to attempt reading a primitive value out of the
1251 // source and write that into the destination without making an allocation, so
1253 self.read_maybe_aligned_mut(aligned, |ectx| {
1254 if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
1255 write_dest(ectx, src_val)?;
1257 let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
1258 ectx.copy(src_ptr, dest_ptr, dest_ty)?;
1259 write_dest(ectx, Value::by_ref(dest_ptr))?;
1265 // Finally, we have the simple case where neither source nor destination are
1266 // `ByRef`. We may simply copy the source value over the the destintion.
1267 write_dest(self, src_val)?;
1272 pub fn write_value_to_ptr(
1277 ) -> EvalResult<'tcx> {
1279 Value::ByRef { ptr, aligned } => {
1280 self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
1282 Value::ByVal(primval) => {
1283 let size = self.type_size(dest_ty)?.expect("dest type must be sized");
1284 self.memory.write_primval(dest, primval, size)
1286 Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest.to_ptr()?, dest_ty),
1290 pub fn write_pair_to_ptr(
1296 ) -> EvalResult<'tcx> {
1297 let mut packed = false;
1298 while self.get_field_count(ty)? == 1 {
1299 let field = self.get_field_ty(ty, 0)?;
1301 packed = packed || field.packed;
1303 assert_eq!(self.get_field_count(ty)?, 2);
1304 let field_0 = self.get_field_offset(ty, 0)?;
1305 let field_1 = self.get_field_offset(ty, 1)?;
1306 let field_0_ty = self.get_field_ty(ty, 0)?;
1307 let field_1_ty = self.get_field_ty(ty, 1)?;
1308 assert_eq!(field_0_ty.packed, field_1_ty.packed, "the two fields must agree on being packed");
1309 packed = packed || field_0_ty.packed;
1310 let field_0_size = self.type_size(field_0_ty.ty)?.expect("pair element type must be sized");
1311 let field_1_size = self.type_size(field_1_ty.ty)?.expect("pair element type must be sized");
1312 let field_0_ptr = ptr.offset(field_0.bytes(), &self)?.into();
1313 let field_1_ptr = ptr.offset(field_1.bytes(), &self)?.into();
1314 self.write_maybe_aligned_mut(!packed,
1315 |ectx| ectx.memory.write_primval(field_0_ptr, a, field_0_size))?;
1316 self.write_maybe_aligned_mut(!packed,
1317 |ectx| ectx.memory.write_primval(field_1_ptr, b, field_1_size))?;
1321 pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> {
1322 use syntax::ast::FloatTy;
1324 let kind = match ty.sty {
1325 ty::TyBool => PrimValKind::Bool,
1326 ty::TyChar => PrimValKind::Char,
1328 ty::TyInt(int_ty) => {
1329 use syntax::ast::IntTy::*;
1330 let size = match int_ty {
1336 Is => self.memory.pointer_size(),
1338 PrimValKind::from_int_size(size)
1341 ty::TyUint(uint_ty) => {
1342 use syntax::ast::UintTy::*;
1343 let size = match uint_ty {
1349 Us => self.memory.pointer_size(),
1351 PrimValKind::from_uint_size(size)
1354 ty::TyFloat(FloatTy::F32) => PrimValKind::F32,
1355 ty::TyFloat(FloatTy::F64) => PrimValKind::F64,
1357 ty::TyFnPtr(_) => PrimValKind::FnPtr,
1359 ty::TyRef(_, ref tam) |
1360 ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr,
1362 ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr,
1364 ty::TyAdt(def, substs) => {
1365 use rustc::ty::layout::Layout::*;
1366 match *self.type_layout(ty)? {
1367 CEnum { discr, signed, .. } => {
1368 let size = discr.size().bytes();
1370 PrimValKind::from_int_size(size)
1372 PrimValKind::from_uint_size(size)
1376 RawNullablePointer { value, .. } => {
1377 use rustc::ty::layout::Primitive::*;
1379 // TODO(solson): Does signedness matter here? What should the sign be?
1380 Int(int) => PrimValKind::from_uint_size(int.size().bytes()),
1381 F32 => PrimValKind::F32,
1382 F64 => PrimValKind::F64,
1383 Pointer => PrimValKind::Ptr,
1387 // represent single field structs as their single field
1388 Univariant { .. } => {
1389 // enums with just one variant are no different, but `.struct_variant()` doesn't work for enums
1390 let variant = &def.variants[0];
1391 // FIXME: also allow structs with only a single non zst field
1392 if variant.fields.len() == 1 {
1393 return self.ty_to_primval_kind(variant.fields[0].ty(self.tcx, substs));
1395 return Err(EvalError::TypeNotPrimitive(ty));
1399 _ => return Err(EvalError::TypeNotPrimitive(ty)),
1403 _ => return Err(EvalError::TypeNotPrimitive(ty)),
1409 fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> {
1411 ty::TyBool if val.to_bytes()? > 1 => Err(EvalError::InvalidBool),
1413 ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none()
1414 => Err(EvalError::InvalidChar(val.to_bytes()? as u32 as u128)),
1420 pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1421 if let Some(val) = self.try_read_value(ptr, ty)? {
1424 bug!("primitive read failed for type: {:?}", ty);
1428 pub(crate) fn read_ptr(&self, ptr: MemoryPointer, pointee_ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1429 let p = self.memory.read_ptr(ptr)?;
1430 if self.type_is_sized(pointee_ty) {
1433 trace!("reading fat pointer extra of type {}", pointee_ty);
1434 let extra = ptr.offset(self.memory.pointer_size(), self)?;
1435 match self.tcx.struct_tail(pointee_ty).sty {
1436 ty::TyDynamic(..) => Ok(p.to_value_with_vtable(self.memory.read_ptr(extra)?.to_ptr()?)),
1438 ty::TyStr => Ok(p.to_value_with_len(self.memory.read_usize(extra)?)),
1439 _ => bug!("unsized primval ptr read from {:?}", pointee_ty),
1444 fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
1445 use syntax::ast::FloatTy;
1447 let val = match ty.sty {
1448 ty::TyBool => PrimVal::from_bool(self.memory.read_bool(ptr.to_ptr()?)?),
1450 let c = self.memory.read_uint(ptr.to_ptr()?, 4)? as u32;
1451 match ::std::char::from_u32(c) {
1452 Some(ch) => PrimVal::from_char(ch),
1453 None => return Err(EvalError::InvalidChar(c as u128)),
1457 ty::TyInt(int_ty) => {
1458 use syntax::ast::IntTy::*;
1459 let size = match int_ty {
1465 Is => self.memory.pointer_size(),
1467 // if we transmute a ptr to an isize, reading it back into a primval shouldn't panic
1468 // Due to read_ptr ignoring the sign, we need to jump around some hoops
1469 match self.memory.read_int(ptr.to_ptr()?, size) {
1470 Err(EvalError::ReadPointerAsBytes) if size == self.memory.pointer_size() =>
1471 // Reading as an int failed because we are seeing ptr bytes *and* we are actually reading at ptr size.
1472 // Let's try again, reading a ptr this time.
1473 self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(),
1474 other => PrimVal::from_i128(other?),
1478 ty::TyUint(uint_ty) => {
1479 use syntax::ast::UintTy::*;
1480 let size = match uint_ty {
1486 Us => self.memory.pointer_size(),
1488 // if we transmute a ptr to an usize, reading it back into a primval shouldn't panic
1489 // for consistency's sake, we use the same code as above
1490 match self.memory.read_uint(ptr.to_ptr()?, size) {
1491 Err(EvalError::ReadPointerAsBytes) if size == self.memory.pointer_size() => self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(),
1492 other => PrimVal::from_u128(other?),
1496 ty::TyFloat(FloatTy::F32) => PrimVal::from_f32(self.memory.read_f32(ptr.to_ptr()?)?),
1497 ty::TyFloat(FloatTy::F64) => PrimVal::from_f64(self.memory.read_f64(ptr.to_ptr()?)?),
1499 ty::TyFnPtr(_) => self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(),
1500 ty::TyRef(_, ref tam) |
1501 ty::TyRawPtr(ref tam) => return self.read_ptr(ptr.to_ptr()?, tam.ty).map(Some),
1503 ty::TyAdt(def, _) => {
1505 return self.read_ptr(ptr.to_ptr()?, ty.boxed_ty()).map(Some);
1507 use rustc::ty::layout::Layout::*;
1508 if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
1509 let size = discr.size().bytes();
1511 PrimVal::from_i128(self.memory.read_int(ptr.to_ptr()?, size)?)
1513 PrimVal::from_u128(self.memory.read_uint(ptr.to_ptr()?, size)?)
1520 _ => return Ok(None),
1523 Ok(Some(Value::ByVal(val)))
1526 pub fn frame(&self) -> &Frame<'tcx> {
1527 self.stack.last().expect("no call frames exist")
1530 pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> {
1531 self.stack.last_mut().expect("no call frames exist")
1534 pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> {
1538 pub(super) fn substs(&self) -> &'tcx Substs<'tcx> {
1539 self.frame().instance.substs
1550 ) -> EvalResult<'tcx> {
1551 // A<Struct> -> A<Trait> conversion
1552 let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
1554 match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
1555 (&ty::TyArray(_, length), &ty::TySlice(_)) => {
1556 let ptr = src.into_ptr(&self.memory)?;
1557 // u64 cast is from usize to u64, which is always good
1558 self.write_value(ptr.to_value_with_len(length as u64), dest, dest_ty)
1560 (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
1561 // For now, upcasts are limited to changes in marker
1562 // traits, and hence never actually require an actual
1563 // change to the vtable.
1564 self.write_value(src, dest, dest_ty)
1566 (_, &ty::TyDynamic(ref data, _)) => {
1567 let trait_ref = data.principal().unwrap().with_self_ty(self.tcx, src_pointee_ty);
1568 let trait_ref = self.tcx.erase_regions(&trait_ref);
1569 let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
1570 let ptr = src.into_ptr(&self.memory)?;
1571 self.write_value(ptr.to_value_with_vtable(vtable), dest, dest_ty)
1574 _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
1584 ) -> EvalResult<'tcx> {
1585 match (&src_ty.sty, &dest_ty.sty) {
1586 (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
1587 (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
1588 (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty),
1589 (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
1590 if def_a.is_box() || def_b.is_box() {
1591 if !def_a.is_box() || !def_b.is_box() {
1592 panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty);
1594 return self.unsize_into_ptr(src, src_ty, dest, dest_ty, src_ty.boxed_ty(), dest_ty.boxed_ty());
1596 if self.ty_to_primval_kind(src_ty).is_ok() {
1597 // TODO: We ignore the packed flag here
1598 let sty = self.get_field_ty(src_ty, 0)?.ty;
1599 let dty = self.get_field_ty(dest_ty, 0)?.ty;
1600 return self.unsize_into(src, sty, dest, dty);
1602 // unsizing of generic struct with pointer fields
1603 // Example: `Arc<T>` -> `Arc<Trait>`
1604 // here we need to increase the size of every &T thin ptr field to a fat ptr
1606 assert_eq!(def_a, def_b);
1608 let src_fields = def_a.variants[0].fields.iter();
1609 let dst_fields = def_b.variants[0].fields.iter();
1611 //let src = adt::MaybeSizedValue::sized(src);
1612 //let dst = adt::MaybeSizedValue::sized(dst);
1613 let src_ptr = match src {
1614 Value::ByRef { ptr, aligned: true } => ptr,
1615 // TODO: Is it possible for unaligned pointers to occur here?
1616 _ => bug!("expected aligned pointer, got {:?}", src),
1620 let dest = self.force_allocation(dest)?.to_ptr()?;
1621 let iter = src_fields.zip(dst_fields).enumerate();
1622 for (i, (src_f, dst_f)) in iter {
1623 let src_fty = self.field_ty(substs_a, src_f);
1624 let dst_fty = self.field_ty(substs_b, dst_f);
1625 if self.type_size(dst_fty)? == Some(0) {
1628 let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
1629 let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
1630 let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
1631 let dst_f_ptr = dest.offset(dst_field_offset, &self)?;
1632 if src_fty == dst_fty {
1633 self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
1635 self.unsize_into(Value::by_ref(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?;
1640 _ => bug!("unsize_into: invalid conversion: {:?} -> {:?}", src_ty, dest_ty),
1644 pub fn dump_local(&self, lvalue: Lvalue<'tcx>) {
1646 if let Lvalue::Local { frame, local } = lvalue {
1647 let mut allocs = Vec::new();
1648 let mut msg = format!("{:?}", local);
1649 if frame != self.cur_frame() {
1650 write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
1652 write!(msg, ":").unwrap();
1654 match self.stack[frame].get_local(local) {
1655 Err(EvalError::DeadLocal) => {
1656 write!(msg, " is dead").unwrap();
1659 panic!("Failed to access local: {:?}", err);
1661 Ok(Value::ByRef { ptr, aligned }) => match ptr.into_inner_primval() {
1662 PrimVal::Ptr(ptr) => {
1663 write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }).unwrap();
1664 allocs.push(ptr.alloc_id);
1666 ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
1668 Ok(Value::ByVal(val)) => {
1669 write!(msg, " {:?}", val).unwrap();
1670 if let PrimVal::Ptr(ptr) = val { allocs.push(ptr.alloc_id); }
1672 Ok(Value::ByValPair(val1, val2)) => {
1673 write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
1674 if let PrimVal::Ptr(ptr) = val1 { allocs.push(ptr.alloc_id); }
1675 if let PrimVal::Ptr(ptr) = val2 { allocs.push(ptr.alloc_id); }
1680 self.memory.dump_allocs(allocs);
1684 /// Convenience function to ensure correct usage of globals and code-sharing with locals.
1685 pub fn modify_global<F>(&mut self, cid: GlobalId<'tcx>, f: F) -> EvalResult<'tcx>
1686 where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
1688 let mut val = self.globals.get(&cid).expect("global not cached").clone();
1689 if val.mutable == Mutability::Immutable {
1690 return Err(EvalError::ModifiedConstantMemory);
1692 val.value = f(self, val.value)?;
1693 *self.globals.get_mut(&cid).expect("already checked") = val;
1697 /// Convenience function to ensure correct usage of locals and code-sharing with globals.
1698 pub fn modify_local<F>(
1703 ) -> EvalResult<'tcx>
1704 where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
1706 let val = self.stack[frame].get_local(local)?;
1707 let new_val = f(self, val)?;
1708 self.stack[frame].set_local(local, new_val)?;
1709 // FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
1710 // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
1711 // self.memory.deallocate(ptr)?;
1716 pub fn report(&self, e: &EvalError) {
1717 if let Some(frame) = self.stack().last() {
1718 let block = &frame.mir.basic_blocks()[frame.block];
1719 let span = if frame.stmt < block.statements.len() {
1720 block.statements[frame.stmt].source_info.span
1722 block.terminator().source_info.span
1724 let mut err = self.tcx.sess.struct_span_err(span, &e.to_string());
1725 for &Frame { instance, span, .. } in self.stack().iter().rev() {
1726 if self.tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr {
1727 err.span_note(span, "inside call to closure");
1730 err.span_note(span, &format!("inside call to {}", instance));
1734 self.tcx.sess.err(&e.to_string());
1739 impl<'tcx> Frame<'tcx> {
1740 pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> {
1741 // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
1742 self.locals[local.index() - 1].ok_or(EvalError::DeadLocal)
1745 fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
1746 // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
1747 match self.locals[local.index() - 1] {
1748 None => Err(EvalError::DeadLocal),
1749 Some(ref mut local) => {
1756 pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
1757 trace!("{:?} is now live", local);
1759 let old = self.locals[local.index() - 1];
1760 self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored
1764 /// Returns the old value of the local
1765 pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
1766 trace!("{:?} is now dead", local);
1768 let old = self.locals[local.index() - 1];
1769 self.locals[local.index() - 1] = None;
1774 // TODO(solson): Upstream these methods into rustc::ty::layout.
1776 pub(super) trait IntegerExt {
1777 fn size(self) -> Size;
1780 impl IntegerExt for layout::Integer {
1781 fn size(self) -> Size {
1782 use rustc::ty::layout::Integer::*;
1784 I1 | I8 => Size::from_bits(8),
1785 I16 => Size::from_bits(16),
1786 I32 => Size::from_bits(32),
1787 I64 => Size::from_bits(64),
1788 I128 => Size::from_bits(128),
1793 pub fn is_inhabited<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
1794 ty.uninhabited_from(&mut HashMap::default(), tcx).is_empty()
1797 /// FIXME: expose trans::monomorphize::resolve_closure
1798 pub fn resolve_closure<'a, 'tcx> (
1799 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1801 substs: ty::ClosureSubsts<'tcx>,
1802 requested_kind: ty::ClosureKind,
1803 ) -> ty::Instance<'tcx> {
1804 let actual_kind = tcx.closure_kind(def_id);
1805 match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
1806 Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
1807 _ => ty::Instance::new(def_id, substs.substs)
1811 fn fn_once_adapter_instance<'a, 'tcx>(
1812 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1814 substs: ty::ClosureSubsts<'tcx>,
1815 ) -> ty::Instance<'tcx> {
1816 debug!("fn_once_adapter_shim({:?}, {:?})",
1819 let fn_once = tcx.lang_items.fn_once_trait().unwrap();
1820 let call_once = tcx.associated_items(fn_once)
1821 .find(|it| it.kind == ty::AssociatedKind::Method)
1823 let def = ty::InstanceDef::ClosureOnceShim { call_once };
1825 let self_ty = tcx.mk_closure_from_closure_substs(
1826 closure_did, substs);
1828 let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs);
1829 let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
1830 assert_eq!(sig.inputs().len(), 1);
1831 let substs = tcx.mk_substs([
1832 Kind::from(self_ty),
1833 Kind::from(sig.inputs()[0]),
1836 debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
1837 ty::Instance { def, substs }
1840 fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind,
1841 trait_closure_kind: ty::ClosureKind)
1844 match (actual_closure_kind, trait_closure_kind) {
1845 (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
1846 (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
1847 (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
1848 // No adapter needed.
1851 (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
1852 // The closure fn `llfn` is a `fn(&self, ...)`. We want a
1853 // `fn(&mut self, ...)`. In fact, at trans time, these are
1854 // basically the same thing, so we can just return llfn.
1857 (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
1858 (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
1859 // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
1860 // self, ...)`. We want a `fn(self, ...)`. We can produce
1861 // this by doing something like:
1863 // fn call_once(self, ...) { call_mut(&self, ...) }
1864 // fn call_once(mut self, ...) { call_mut(&mut self, ...) }
1866 // These are both the same at trans time.
1873 /// The point where linking happens. Resolve a (def_id, substs)
1874 /// pair to an instance.
1875 pub fn resolve<'a, 'tcx>(
1876 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1878 substs: &'tcx Substs<'tcx>
1879 ) -> ty::Instance<'tcx> {
1880 debug!("resolve(def_id={:?}, substs={:?})",
1882 let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
1883 debug!(" => associated item, attempting to find impl");
1884 let item = tcx.associated_item(def_id);
1885 resolve_associated_item(tcx, &item, trait_def_id, substs)
1887 let item_type = def_ty(tcx, def_id, substs);
1888 let def = match item_type.sty {
1889 ty::TyFnDef(..) if {
1890 let f = item_type.fn_sig(tcx);
1891 f.abi() == Abi::RustIntrinsic ||
1892 f.abi() == Abi::PlatformIntrinsic
1895 debug!(" => intrinsic");
1896 ty::InstanceDef::Intrinsic(def_id)
1899 if Some(def_id) == tcx.lang_items.drop_in_place_fn() {
1900 let ty = substs.type_at(0);
1901 if needs_drop_glue(tcx, ty) {
1902 debug!(" => nontrivial drop glue");
1903 ty::InstanceDef::DropGlue(def_id, Some(ty))
1905 debug!(" => trivial drop glue");
1906 ty::InstanceDef::DropGlue(def_id, None)
1909 debug!(" => free item");
1910 ty::InstanceDef::Item(def_id)
1914 ty::Instance { def, substs }
1916 debug!("resolve(def_id={:?}, substs={:?}) = {}",
1917 def_id, substs, result);
1921 pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bool {
1922 assert!(t.is_normalized_for_trans());
1924 let t = tcx.erase_regions(&t);
1926 // FIXME (#22815): note that type_needs_drop conservatively
1927 // approximates in some cases and may say a type expression
1928 // requires drop glue when it actually does not.
1930 // (In this case it is not clear whether any harm is done, i.e.
1931 // erroneously returning `true` in some cases where we could have
1932 // returned `false` does not appear unsound. The impact on
1933 // code quality is unknown at this time.)
1935 let env = ty::ParamEnv::empty(Reveal::All);
1936 if !t.needs_drop(tcx, env) {
1940 ty::TyAdt(def, _) if def.is_box() => {
1941 let typ = t.boxed_ty();
1942 if !typ.needs_drop(tcx, env) && type_is_sized(tcx, typ) {
1943 let layout = t.layout(tcx, ty::ParamEnv::empty(Reveal::All)).unwrap();
1944 // `Box<ZeroSizeType>` does not allocate.
1945 layout.size(&tcx.data_layout).bytes() != 0
1954 fn resolve_associated_item<'a, 'tcx>(
1955 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1956 trait_item: &ty::AssociatedItem,
1958 rcvr_substs: &'tcx Substs<'tcx>
1959 ) -> ty::Instance<'tcx> {
1960 let def_id = trait_item.def_id;
1961 debug!("resolve_associated_item(trait_item={:?}, \
1964 def_id, trait_id, rcvr_substs);
1966 let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
1967 let vtbl = fulfill_obligation(tcx, DUMMY_SP, ty::Binder(trait_ref));
1969 // Now that we know which impl is being used, we can dispatch to
1970 // the actual function:
1972 ::rustc::traits::VtableImpl(impl_data) => {
1973 let (def_id, substs) = ::rustc::traits::find_associated_item(
1974 tcx, trait_item, rcvr_substs, &impl_data);
1975 let substs = tcx.erase_regions(&substs);
1976 ty::Instance::new(def_id, substs)
1978 ::rustc::traits::VtableClosure(closure_data) => {
1979 let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
1980 resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs,
1983 ::rustc::traits::VtableFnPointer(ref data) => {
1985 def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
1989 ::rustc::traits::VtableObject(ref data) => {
1990 let index = tcx.get_vtable_index_of_object_method(data, def_id);
1992 def: ty::InstanceDef::Virtual(def_id, index),
1997 bug!("static call to invalid vtable: {:?}", vtbl)
2002 pub fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
2004 substs: &'tcx Substs<'tcx>)
2007 let ty = tcx.type_of(def_id);
2008 apply_param_substs(tcx, substs, &ty)
2011 /// Monomorphizes a type from the AST by first applying the in-scope
2012 /// substitutions and then normalizing any associated types.
2013 pub fn apply_param_substs<'a, 'tcx, T>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
2014 param_substs: &Substs<'tcx>,
2017 where T: ::rustc::infer::TransNormalize<'tcx>
2019 debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value);
2020 let substituted = value.subst(tcx, param_substs);
2021 let substituted = tcx.erase_regions(&substituted);
2022 AssociatedTypeNormalizer{ tcx }.fold(&substituted)
2026 struct AssociatedTypeNormalizer<'a, 'tcx: 'a> {
2027 tcx: TyCtxt<'a, 'tcx, 'tcx>,
2030 impl<'a, 'tcx> AssociatedTypeNormalizer<'a, 'tcx> {
2031 fn fold<T: TypeFoldable<'tcx>>(&mut self, value: &T) -> T {
2032 if !value.has_projection_types() {
2035 value.fold_with(self)
2040 impl<'a, 'tcx> ::rustc::ty::fold::TypeFolder<'tcx, 'tcx> for AssociatedTypeNormalizer<'a, 'tcx> {
2041 fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> {
2045 fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
2046 if !ty.has_projection_types() {
2049 self.tcx.normalize_associated_type(&ty)
2054 fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
2055 // generics are weird, don't run this function on a generic
2056 assert!(!ty.needs_subst());
2057 ty.is_sized(tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
2060 /// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
2061 /// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
2062 /// guarantee to us that all nested obligations *could be* resolved if we wanted to.
2063 fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
2065 trait_ref: ty::PolyTraitRef<'tcx>)
2066 -> traits::Vtable<'tcx, ()>
2068 // Remove any references to regions; this helps improve caching.
2069 let trait_ref = tcx.erase_regions(&trait_ref);
2071 debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
2072 trait_ref, trait_ref.def_id());
2074 // Do the initial selection for the obligation. This yields the
2075 // shallow result we are looking for -- that is, what specific impl.
2076 tcx.infer_ctxt().enter(|infcx| {
2077 let mut selcx = traits::SelectionContext::new(&infcx);
2079 let obligation_cause = traits::ObligationCause::misc(span,
2080 ast::DUMMY_NODE_ID);
2081 let obligation = traits::Obligation::new(obligation_cause,
2082 ty::ParamEnv::empty(Reveal::All),
2083 trait_ref.to_poly_trait_predicate());
2085 let selection = match selcx.select(&obligation) {
2086 Ok(Some(selection)) => selection,
2088 // Ambiguity can happen when monomorphizing during trans
2089 // expands to some humongo type that never occurred
2090 // statically -- this humongo type can then overflow,
2091 // leading to an ambiguous result. So report this as an
2092 // overflow bug, since I believe this is the only case
2093 // where ambiguity can result.
2094 debug!("Encountered ambiguity selecting `{:?}` during trans, \
2095 presuming due to overflow",
2097 tcx.sess.span_fatal(span,
2098 "reached the recursion limit during monomorphization \
2099 (selection ambiguity)");
2102 span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
2107 debug!("fulfill_obligation: selection={:?}", selection);
2109 // Currently, we use a fulfillment context to completely resolve
2110 // all nested obligations. This is because they can inform the
2111 // inference of the impl's type parameters.
2112 let mut fulfill_cx = traits::FulfillmentContext::new();
2113 let vtable = selection.map(|predicate| {
2114 debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
2115 fulfill_cx.register_predicate_obligation(&infcx, predicate);
2117 let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
2119 debug!("Cache miss: {:?} => {:?}", trait_ref, vtable);
2124 pub fn resolve_drop_in_place<'a, 'tcx>(
2125 tcx: TyCtxt<'a, 'tcx, 'tcx>,
2127 ) -> ty::Instance<'tcx>
2129 let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem);
2130 let substs = tcx.intern_substs(&[Kind::from(ty)]);
2131 resolve(tcx, def_id, substs)