5 use syntax::source_map::{self, Span, DUMMY_SP};
6 use rustc::hir::def_id::DefId;
7 use rustc::hir::def::DefKind;
9 use rustc::ty::layout::{
10 self, Size, Align, HasDataLayout, LayoutOf, TyLayout
12 use rustc::ty::subst::SubstsRef;
13 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
14 use rustc::ty::query::TyCtxtAt;
15 use rustc_index::vec::IndexVec;
16 use rustc::mir::interpret::{
17 GlobalId, Scalar, Pointer, FrameInfo, AllocId,
18 InterpResult, truncate, sign_extend,
20 use rustc_data_structures::fx::FxHashMap;
23 Immediate, Operand, MemPlace, MPlaceTy, Place, PlaceTy, ScalarMaybeUndef,
24 Memory, Machine, PointerArithmetic, FnVal, StackPopInfo
27 pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
28 /// Stores the `Machine` instance.
31 /// The results of the type checker, from rustc.
32 pub tcx: TyCtxtAt<'tcx>,
34 /// Bounds in scope for polymorphic evaluations.
35 pub(crate) param_env: ty::ParamEnv<'tcx>,
37 /// The virtual memory system.
38 pub memory: Memory<'mir, 'tcx, M>,
40 /// The virtual call stack.
41 pub(crate) stack: Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>>,
43 /// A cache for deduplicating vtables
45 FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
50 pub struct Frame<'mir, 'tcx, Tag=(), Extra=()> {
51 ////////////////////////////////////////////////////////////////////////////////
52 // Function and callsite information
53 ////////////////////////////////////////////////////////////////////////////////
54 /// The MIR for the function called on this frame.
55 pub body: &'mir mir::Body<'tcx>,
57 /// The def_id and substs of the current function.
58 pub instance: ty::Instance<'tcx>,
60 /// The span of the call site.
61 pub span: source_map::Span,
63 ////////////////////////////////////////////////////////////////////////////////
64 // Return place and locals
65 ////////////////////////////////////////////////////////////////////////////////
66 /// Work to perform when returning from this function.
67 pub return_to_block: StackPopCleanup,
69 /// The location where the result of the current stack frame should be written to,
70 /// and its layout in the caller.
71 pub return_place: Option<PlaceTy<'tcx, Tag>>,
73 /// The list of locals for this stack frame, stored in order as
74 /// `[return_ptr, arguments..., variables..., temporaries...]`.
75 /// The locals are stored as `Option<Value>`s.
76 /// `None` represents a local that is currently dead, while a live local
77 /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
78 pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
80 ////////////////////////////////////////////////////////////////////////////////
81 // Current position within the function
82 ////////////////////////////////////////////////////////////////////////////////
83 /// The block that is currently executed (or will be executed after the above call stacks
85 pub block: mir::BasicBlock,
87 /// The index of the currently evaluated statement.
90 /// Extra data for the machine.
94 #[derive(Clone, Eq, PartialEq, Debug)] // Miri debug-prints these
95 pub enum StackPopCleanup {
96 /// Jump to the next block in the caller, or cause UB if None (that's a function
97 /// that may never return). Also store layout of return place so
98 /// we can validate it at that layout.
99 /// 'ret' stores the block we jump to on a normal return, while 'unwind'
100 /// stores the block used for cleanup during unwinding
101 Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> },
102 /// Just do nohing: Used by Main and for the box_alloc hook in miri.
103 /// `cleanup` says whether locals are deallocated. Static computation
104 /// wants them leaked to intern what they need (and just throw away
105 /// the entire `ecx` when it is done).
106 None { cleanup: bool },
109 /// State of a local variable including a memoized layout
110 #[derive(Clone, PartialEq, Eq)]
111 pub struct LocalState<'tcx, Tag=(), Id=AllocId> {
112 pub value: LocalValue<Tag, Id>,
113 /// Don't modify if `Some`, this is only used to prevent computing the layout twice
114 pub layout: Cell<Option<TyLayout<'tcx>>>,
117 /// Current value of a local variable
118 #[derive(Clone, PartialEq, Eq, Debug)] // Miri debug-prints these
119 pub enum LocalValue<Tag=(), Id=AllocId> {
120 /// This local is not currently alive, and cannot be used at all.
122 /// This local is alive but not yet initialized. It can be written to
123 /// but not read from or its address taken. Locals get initialized on
124 /// first write because for unsized locals, we do not know their size
127 /// A normal, live local.
128 /// Mostly for convenience, we re-use the `Operand` type here.
129 /// This is an optimization over just always having a pointer here;
130 /// we can thus avoid doing an allocation when the local just stores
131 /// immediate values *and* never has its address taken.
132 Live(Operand<Tag, Id>),
135 impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
136 pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
138 LocalValue::Dead => throw_unsup!(DeadLocal),
139 LocalValue::Uninitialized =>
140 bug!("The type checker should prevent reading from a never-written local"),
141 LocalValue::Live(val) => Ok(val),
145 /// Overwrite the local. If the local can be overwritten in place, return a reference
146 /// to do so; otherwise return the `MemPlace` to consult instead.
149 ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
151 LocalValue::Dead => throw_unsup!(DeadLocal),
152 LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
153 ref mut local @ LocalValue::Live(Operand::Immediate(_)) |
154 ref mut local @ LocalValue::Uninitialized => {
161 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
163 fn data_layout(&self) -> &layout::TargetDataLayout {
164 &self.tcx.data_layout
168 impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
170 M: Machine<'mir, 'tcx>,
173 fn tcx(&self) -> TyCtxt<'tcx> {
178 impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
180 M: Machine<'mir, 'tcx>,
182 fn param_env(&self) -> ty::ParamEnv<'tcx> {
187 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
189 type TyLayout = InterpResult<'tcx, TyLayout<'tcx>>;
192 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
194 .layout_of(self.param_env.and(ty))
195 .map_err(|layout| err_inval!(Layout(layout)).into())
199 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
202 param_env: ty::ParamEnv<'tcx>,
204 memory_extra: M::MemoryExtra,
210 memory: Memory::new(tcx, memory_extra),
212 vtables: FxHashMap::default(),
219 scalar: Scalar<M::PointerTag>,
220 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
221 self.memory.force_ptr(scalar)
227 scalar: Scalar<M::PointerTag>,
229 ) -> InterpResult<'tcx, u128> {
230 self.memory.force_bits(scalar, size)
234 pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
235 self.memory.tag_static_base_pointer(ptr)
239 pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
244 pub fn cur_frame(&self) -> usize {
245 assert!(self.stack.len() > 0);
250 pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
251 self.stack.last().expect("no call frames exist")
255 pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
256 self.stack.last_mut().expect("no call frames exist")
260 pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
265 pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 {
266 assert!(ty.abi.is_signed());
267 sign_extend(value, ty.size)
271 pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 {
272 truncate(value, ty.size)
276 pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
277 ty.is_sized(self.tcx, self.param_env)
281 pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
282 ty.is_freeze(*self.tcx, self.param_env, DUMMY_SP)
287 instance: ty::InstanceDef<'tcx>,
288 promoted: Option<mir::Promoted>,
289 ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
290 // do not continue if typeck errors occurred (can only occur in local crate)
291 let did = instance.def_id();
293 && self.tcx.has_typeck_tables(did)
294 && self.tcx.typeck_tables_of(did).tainted_by_errors
296 throw_inval!(TypeckError)
298 trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
299 if let Some(promoted) = promoted {
300 return Ok(&self.tcx.promoted_mir(did)[promoted]);
303 ty::InstanceDef::Item(def_id) => if self.tcx.is_mir_available(did) {
304 Ok(self.tcx.optimized_mir(did))
306 throw_unsup!(NoMirFor(self.tcx.def_path_str(def_id)))
308 _ => Ok(self.tcx.instance_mir(instance)),
312 /// Call this on things you got out of the MIR (so it is as generic as the current
313 /// stack frame), to bring it into the proper environment for this interpreter.
314 pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
318 self.tcx.subst_and_normalize_erasing_regions(
319 self.frame().instance.substs,
325 /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
326 pub(super) fn resolve(
329 substs: SubstsRef<'tcx>
330 ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
331 trace!("resolve: {:?}, {:#?}", def_id, substs);
332 trace!("param_env: {:#?}", self.param_env);
333 trace!("substs: {:#?}", substs);
334 ty::Instance::resolve(
339 ).ok_or_else(|| err_inval!(TooGeneric).into())
342 pub fn layout_of_local(
344 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
346 layout: Option<TyLayout<'tcx>>,
347 ) -> InterpResult<'tcx, TyLayout<'tcx>> {
348 // `const_prop` runs into this with an invalid (empty) frame, so we
349 // have to support that case (mostly by skipping all caching).
350 match frame.locals.get(local).and_then(|state| state.layout.get()) {
352 let layout = crate::interpret::operand::from_known_layout(layout, || {
353 let local_ty = frame.body.local_decls[local].ty;
354 let local_ty = self.tcx.subst_and_normalize_erasing_regions(
355 frame.instance.substs,
359 self.layout_of(local_ty)
361 if let Some(state) = frame.locals.get(local) {
362 // Layouts of locals are requested a lot, so we cache them.
363 state.layout.set(Some(layout));
367 Some(layout) => Ok(layout),
371 /// Returns the actual dynamic size and alignment of the place at the given type.
372 /// Only the "meta" (metadata) part of the place matters.
373 /// This can fail to provide an answer for extern types.
374 pub(super) fn size_and_align_of(
376 metadata: Option<Scalar<M::PointerTag>>,
377 layout: TyLayout<'tcx>,
378 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
379 if !layout.is_unsized() {
380 return Ok(Some((layout.size, layout.align.abi)));
382 match layout.ty.kind {
383 ty::Adt(..) | ty::Tuple(..) => {
384 // First get the size of all statically known fields.
385 // Don't use type_of::sizing_type_of because that expects t to be sized,
386 // and it also rounds up to alignment, which we want to avoid,
387 // as the unsized field's alignment could be smaller.
388 assert!(!layout.ty.is_simd());
389 trace!("DST layout: {:?}", layout);
391 let sized_size = layout.fields.offset(layout.fields.count() - 1);
392 let sized_align = layout.align.abi;
394 "DST {} statically sized prefix size: {:?} align: {:?}",
400 // Recurse to get the size of the dynamically sized field (must be
401 // the last field). Can't have foreign types here, how would we
402 // adjust alignment and size for them?
403 let field = layout.field(self, layout.fields.count() - 1)?;
404 let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
405 Some(size_and_align) => size_and_align,
407 // A field with extern type. If this field is at offset 0, we behave
408 // like the underlying extern type.
409 // FIXME: Once we have made decisions for how to handle size and alignment
410 // of `extern type`, this should be adapted. It is just a temporary hack
411 // to get some code to work that probably ought to work.
412 if sized_size == Size::ZERO {
415 bug!("Fields cannot be extern types, unless they are at offset 0")
420 // FIXME (#26403, #27023): We should be adding padding
421 // to `sized_size` (to accommodate the `unsized_align`
422 // required of the unsized field that follows) before
423 // summing it with `sized_size`. (Note that since #26403
424 // is unfixed, we do not yet add the necessary padding
425 // here. But this is where the add would go.)
427 // Return the sum of sizes and max of aligns.
428 let size = sized_size + unsized_size;
430 // Choose max of two known alignments (combined value must
431 // be aligned according to more restrictive of the two).
432 let align = sized_align.max(unsized_align);
434 // Issue #27023: must add any necessary padding to `size`
435 // (to make it a multiple of `align`) before returning it.
436 let size = size.align_to(align);
438 // Check if this brought us over the size limit.
439 if size.bytes() >= self.tcx.data_layout().obj_size_bound() {
440 throw_ub_format!("wide pointer metadata contains invalid information: \
441 total size is bigger than largest supported object");
443 Ok(Some((size, align)))
446 let vtable = metadata.expect("dyn trait fat ptr must have vtable");
447 // Read size and align from vtable (already checks size).
448 Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
451 ty::Slice(_) | ty::Str => {
452 let len = metadata.expect("slice fat ptr must have length").to_machine_usize(self)?;
453 let elem = layout.field(self, 0)?;
455 // Make sure the slice is not too big.
456 let size = elem.size.checked_mul(len, &*self.tcx)
457 .ok_or_else(|| err_ub_format!("invalid slice: \
458 total size is bigger than largest supported object"))?;
459 Ok(Some((size, elem.align.abi)))
466 _ => bug!("size_and_align_of::<{:?}> not supported", layout.ty),
470 pub fn size_and_align_of_mplace(
472 mplace: MPlaceTy<'tcx, M::PointerTag>
473 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
474 self.size_and_align_of(mplace.meta, mplace.layout)
477 pub fn push_stack_frame(
479 instance: ty::Instance<'tcx>,
481 body: &'mir mir::Body<'tcx>,
482 return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
483 return_to_block: StackPopCleanup,
484 ) -> InterpResult<'tcx> {
485 if self.stack.len() > 0 {
486 info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance);
488 ::log_settings::settings().indentation += 1;
490 // first push a stack frame so we have access to the local substs
491 let extra = M::stack_push(self)?;
492 self.stack.push(Frame {
494 block: mir::START_BLOCK,
497 // empty local array, we fill it in below, after we are inside the stack frame and
498 // all methods actually know about the frame
499 locals: IndexVec::new(),
506 // don't allocate at all for trivial constants
507 if body.local_decls.len() > 1 {
508 // Locals are initially uninitialized.
509 let dummy = LocalState {
510 value: LocalValue::Uninitialized,
511 layout: Cell::new(None),
513 let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
514 // Return place is handled specially by the `eval_place` functions, and the
515 // entry in `locals` should never be used. Make it dead, to be sure.
516 locals[mir::RETURN_PLACE].value = LocalValue::Dead;
517 // Now mark those locals as dead that we do not want to initialize
518 match self.tcx.def_kind(instance.def_id()) {
519 // statics and constants don't have `Storage*` statements, no need to look for them
520 Some(DefKind::Static)
521 | Some(DefKind::Const)
522 | Some(DefKind::AssocConst) => {},
524 trace!("push_stack_frame: {:?}: num_bbs: {}", span, body.basic_blocks().len());
525 for block in body.basic_blocks() {
526 for stmt in block.statements.iter() {
527 use rustc::mir::StatementKind::{StorageDead, StorageLive};
530 StorageDead(local) => {
531 locals[local].value = LocalValue::Dead;
540 self.frame_mut().locals = locals;
543 info!("ENTERING({}) {}", self.cur_frame(), self.frame().instance);
545 if self.stack.len() > self.tcx.sess.const_eval_stack_frame_limit {
546 throw_exhaust!(StackFrameLimitReached)
552 pub(super) fn pop_stack_frame_internal(
555 ) -> InterpResult<'tcx, (StackPopCleanup, StackPopInfo)> {
556 info!("LEAVING({}) {} (unwinding = {})",
557 self.cur_frame(), self.frame().instance, unwinding);
559 ::log_settings::settings().indentation -= 1;
560 let frame = self.stack.pop().expect(
561 "tried to pop a stack frame, but there were none",
563 let stack_pop_info = M::stack_pop(self, frame.extra)?;
565 // Abort early if we do not want to clean up: We also avoid validation in that case,
566 // because this is CTFE and the final value will be thoroughly validated anyway.
567 match frame.return_to_block {
568 StackPopCleanup::Goto{ .. } => {},
569 StackPopCleanup::None { cleanup, .. } => {
570 assert!(!unwinding, "Encountered StackPopCleanup::None while unwinding");
573 assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked");
574 // Leak the locals, skip validation.
575 return Ok((frame.return_to_block, stack_pop_info));
579 // Deallocate all locals that are backed by an allocation.
580 for local in frame.locals {
581 self.deallocate_local(local.value)?;
584 // If we're popping frames due to unwinding, and we didn't just exit
585 // unwinding, we skip a bunch of validation and cleanup logic (including
586 // jumping to the regular return block specified in the StackPopCleanu)
587 let cur_unwinding = unwinding && stack_pop_info != StackPopInfo::StopUnwinding;
589 info!("StackPopCleanup: {:?} StackPopInfo: {:?} cur_unwinding = {:?}",
590 frame.return_to_block, stack_pop_info, cur_unwinding);
593 // When we're popping a stack frame for unwinding purposes,
594 // we don't care at all about returning-related stuff (i.e. return_place
595 // and return_to_block), because we're not performing a return from this frame.
597 // Validate the return value. Do this after deallocating so that we catch dangling
599 if let Some(return_place) = frame.return_place {
600 if M::enforce_validity(self) {
601 // Data got changed, better make sure it matches the type!
602 // It is still possible that the return place held invalid data while
603 // the function is running, but that's okay because nobody could have
604 // accessed that same data from the "outside" to observe any broken
605 // invariant -- that is, unless a function somehow has a ptr to
606 // its return place... but the way MIR is currently generated, the
607 // return place is always a local and then this cannot happen.
608 self.validate_operand(
609 self.place_to_op(return_place)?,
615 // Uh, that shouldn't happen... the function did not intend to return
616 throw_ub!(Unreachable);
619 // Jump to new block -- *after* validation so that the spans make more sense.
620 match frame.return_to_block {
621 StackPopCleanup::Goto { ret, .. } => {
622 self.goto_block(ret)?;
624 StackPopCleanup::None { .. } => {}
629 Ok((frame.return_to_block, stack_pop_info))
632 pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
633 let (mut cleanup, mut stack_pop_info) = self.pop_stack_frame_internal(unwinding)?;
635 // There are two cases where we want to unwind the stack:
636 // * The caller explicitly told us (i.e. we hit a Resume terminator)
637 // * The machine indicated that we've just started unwinding (i.e.
638 // a panic has just occured)
639 if unwinding || stack_pop_info == StackPopInfo::StartUnwinding {
640 trace!("unwinding: starting stack unwind...");
641 // Overwrite our current stack_pop_info, so that the check
642 // below doesn't fail.
643 stack_pop_info = StackPopInfo::Normal;
644 // There are three posible ways that we can exit the loop:
645 // 1) We find an unwind block - we jump to it to allow cleanup
646 // to occur for that frame
647 // 2) pop_stack_frame_internal reports that we're no longer unwinding
648 // - this means that the panic has been caught, and that execution
649 // should continue as normal
650 // 3) We pop all of our frames off the stack - this should never happen.
651 while !self.stack.is_empty() {
652 match stack_pop_info {
653 // We tried to start unwinding while we were already
654 // unwinding. Note that this **is not** the same thing
655 // as a double panic, which will be intercepted by
656 // libcore/libstd before we actually attempt to unwind.
657 StackPopInfo::StartUnwinding => {
658 throw_ub_format!("Attempted to start unwinding while already unwinding!");
660 StackPopInfo::StopUnwinding => {
661 trace!("unwinding: no longer unwinding!");
664 StackPopInfo::Normal => {}
668 StackPopCleanup::Goto { unwind, .. } if unwind.is_some() => {
670 info!("unwind: found cleanup block {:?}", unwind);
671 self.goto_block(unwind)?;
677 info!("unwinding: popping frame!");
678 let res = self.pop_stack_frame_internal(true)?;
680 stack_pop_info = res.1;
682 if self.stack.is_empty() {
683 // We should never get here:
684 // The 'start_fn' lang item should always install a panic handler
685 throw_ub!(Unreachable);
690 if self.stack.len() > 0 {
691 info!("CONTINUING({}) {}", self.cur_frame(), self.frame().instance);
697 /// Mark a storage as live, killing the previous content and returning it.
698 /// Remember to deallocate that!
702 ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
703 assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
704 trace!("{:?} is now live", local);
706 let local_val = LocalValue::Uninitialized;
707 // StorageLive *always* kills the value that's currently stored.
708 // However, we do not error if the variable already is live;
709 // see <https://github.com/rust-lang/rust/issues/42371>.
710 Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
713 /// Returns the old value of the local.
714 /// Remember to deallocate that!
715 pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
716 assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
717 trace!("{:?} is now dead", local);
719 mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
722 pub(super) fn deallocate_local(
724 local: LocalValue<M::PointerTag>,
725 ) -> InterpResult<'tcx> {
726 // FIXME: should we tell the user that there was a local which was never written to?
727 if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
728 trace!("deallocating local");
729 let ptr = ptr.to_ptr()?;
730 self.memory.dump_alloc(ptr.alloc_id);
731 self.memory.deallocate_local(ptr)?;
736 pub fn const_eval_raw(
739 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
740 // FIXME(oli-obk): make this check an assertion that it's not a static here
741 // FIXME(RalfJ, oli-obk): document that `Place::Static` can never be anything but a static
742 // and `ConstValue::Unevaluated` can never be a static
743 let param_env = if self.tcx.is_static(gid.instance.def_id()) {
744 ty::ParamEnv::reveal_all()
748 // We use `const_eval_raw` here, and get an unvalidated result. That is okay:
749 // Our result will later be validated anyway, and there seems no good reason
750 // to have to fail early here. This is also more consistent with
751 // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles.
752 let val = self.tcx.const_eval_raw(param_env.and(gid))?;
753 self.raw_const_to_mplace(val)
756 pub fn dump_place(&self, place: Place<M::PointerTag>) {
758 if !log_enabled!(::log::Level::Trace) {
762 Place::Local { frame, local } => {
763 let mut allocs = Vec::new();
764 let mut msg = format!("{:?}", local);
765 if frame != self.cur_frame() {
766 write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
768 write!(msg, ":").unwrap();
770 match self.stack[frame].locals[local].value {
771 LocalValue::Dead => write!(msg, " is dead").unwrap(),
772 LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(),
773 LocalValue::Live(Operand::Indirect(mplace)) => {
775 Scalar::Ptr(ptr) => {
776 write!(msg, " by align({}){} ref:",
777 mplace.align.bytes(),
779 Some(meta) => format!(" meta({:?})", meta),
780 None => String::new()
783 allocs.push(ptr.alloc_id);
785 ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
788 LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
789 write!(msg, " {:?}", val).unwrap();
790 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
791 allocs.push(ptr.alloc_id);
794 LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
795 write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
796 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
797 allocs.push(ptr.alloc_id);
799 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 {
800 allocs.push(ptr.alloc_id);
806 self.memory.dump_allocs(allocs);
808 Place::Ptr(mplace) => {
810 Scalar::Ptr(ptr) => {
811 trace!("by align({}) ref:", mplace.align.bytes());
812 self.memory.dump_alloc(ptr.alloc_id);
814 ptr => trace!(" integral by ref: {:?}", ptr),
820 pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> Vec<FrameInfo<'tcx>> {
821 let mut last_span = None;
822 let mut frames = Vec::new();
823 for &Frame { instance, span, body, block, stmt, .. } in self.stack().iter().rev() {
824 // make sure we don't emit frames that are duplicates of the previous
825 if explicit_span == Some(span) {
826 last_span = Some(span);
829 if let Some(last) = last_span {
834 last_span = Some(span);
836 let block = &body.basic_blocks()[block];
837 let source_info = if stmt < block.statements.len() {
838 block.statements[stmt].source_info
840 block.terminator().source_info
842 let lint_root = match body.source_scope_local_data {
843 mir::ClearCrossCrate::Set(ref ivs) => Some(ivs[source_info.scope].lint_root),
844 mir::ClearCrossCrate::Clear => None,
846 frames.push(FrameInfo { call_site: span, instance, lint_root });
848 trace!("generate stacktrace: {:#?}, {:?}", frames, explicit_span);
852 /// Resolve the function at the specified slot in the provided
853 /// vtable. An index of '0' corresponds to the first method
854 /// declared in the trait of the provided vtable
855 pub fn get_vtable_slot(
857 vtable: Scalar<M::PointerTag>,
859 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
860 let ptr_size = self.pointer_size();
861 // Skip over the 'drop_ptr', 'size', and 'align' fields
862 let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
863 let vtable_slot = self.memory.check_ptr_access(
866 self.tcx.data_layout.pointer_align.abi,
867 )?.expect("cannot be a ZST");
868 let fn_ptr = self.memory.get(vtable_slot.alloc_id)?
869 .read_ptr_sized(self, vtable_slot)?.not_undef()?;
870 Ok(self.memory.get_fn(fn_ptr)?)