5 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
6 use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
7 use rustc_index::vec::IndexVec;
8 use rustc_macros::HashStable;
10 use rustc_middle::mir::interpret::{InterpError, InvalidProgramInfo};
11 use rustc_middle::ty::layout::{self, LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
12 use rustc_middle::ty::{
13 self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
15 use rustc_mir_dataflow::storage::AlwaysLiveLocals;
16 use rustc_query_system::ich::StableHashingContext;
17 use rustc_session::Limit;
18 use rustc_span::{Pos, Span};
19 use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
22 AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
23 MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, Pointer, Provenance, Scalar,
24 ScalarMaybeUninit, StackPopJump,
26 use crate::transform::validate::equal_up_to_regions;
28 pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
29 /// Stores the `Machine` instance.
31 /// Note: the stack is provided by the machine.
34 /// The results of the type checker, from rustc.
35 /// The span in this is the "root" of the evaluation, i.e., the const
36 /// we are evaluating (if this is CTFE).
37 pub tcx: TyCtxtAt<'tcx>,
39 /// Bounds in scope for polymorphic evaluations.
40 pub(crate) param_env: ty::ParamEnv<'tcx>,
42 /// The virtual memory system.
43 pub memory: Memory<'mir, 'tcx, M>,
45 /// The recursion limit (cached from `tcx.recursion_limit(())`)
46 pub recursion_limit: Limit,
49 // The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
50 // boundary and dropped in the other thread, it would exit the span in the other thread.
51 struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
54 /// By default a `SpanGuard` does nothing.
56 Self(tracing::Span::none(), std::marker::PhantomData)
59 /// If a span is entered, we exit the previous span (if any, normally none) and enter the
60 /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
61 /// `Frame` by creating a dummy span to being with and then entering it once the frame has
63 fn enter(&mut self, span: tracing::Span) {
64 // This executes the destructor on the previous instance of `SpanGuard`, ensuring that
65 // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
66 // can't protect the tracing stack, but that'll just lead to weird logging, no actual
68 *self = Self(span, std::marker::PhantomData);
69 self.0.with_subscriber(|(id, dispatch)| {
75 impl Drop for SpanGuard {
77 self.0.with_subscriber(|(id, dispatch)| {
84 pub struct Frame<'mir, 'tcx, Tag: Provenance = AllocId, Extra = ()> {
85 ////////////////////////////////////////////////////////////////////////////////
86 // Function and callsite information
87 ////////////////////////////////////////////////////////////////////////////////
88 /// The MIR for the function called on this frame.
89 pub body: &'mir mir::Body<'tcx>,
91 /// The def_id and substs of the current function.
92 pub instance: ty::Instance<'tcx>,
94 /// Extra data for the machine.
97 ////////////////////////////////////////////////////////////////////////////////
98 // Return place and locals
99 ////////////////////////////////////////////////////////////////////////////////
100 /// Work to perform when returning from this function.
101 pub return_to_block: StackPopCleanup,
103 /// The location where the result of the current stack frame should be written to,
104 /// and its layout in the caller.
105 pub return_place: Option<PlaceTy<'tcx, Tag>>,
107 /// The list of locals for this stack frame, stored in order as
108 /// `[return_ptr, arguments..., variables..., temporaries...]`.
109 /// The locals are stored as `Option<Value>`s.
110 /// `None` represents a local that is currently dead, while a live local
111 /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
112 pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
114 /// The span of the `tracing` crate is stored here.
115 /// When the guard is dropped, the span is exited. This gives us
116 /// a full stack trace on all tracing statements.
117 tracing_span: SpanGuard,
119 ////////////////////////////////////////////////////////////////////////////////
120 // Current position within the function
121 ////////////////////////////////////////////////////////////////////////////////
122 /// If this is `Err`, we are not currently executing any particular statement in
123 /// this frame (can happen e.g. during frame initialization, and during unwinding on
124 /// frames without cleanup code).
125 /// We basically abuse `Result` as `Either`.
126 pub(super) loc: Result<mir::Location, Span>,
129 /// What we store about a frame in an interpreter backtrace.
131 pub struct FrameInfo<'tcx> {
132 pub instance: ty::Instance<'tcx>,
134 pub lint_root: Option<hir::HirId>,
137 /// Unwind information.
138 #[derive(Clone, Copy, Eq, PartialEq, Debug, HashStable)]
139 pub enum StackPopUnwind {
140 /// The cleanup block.
141 Cleanup(mir::BasicBlock),
142 /// No cleanup needs to be done.
144 /// Unwinding is not allowed (UB).
148 #[derive(Clone, Copy, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
149 pub enum StackPopCleanup {
150 /// Jump to the next block in the caller, or cause UB if None (that's a function
151 /// that may never return). Also store layout of return place so
152 /// we can validate it at that layout.
153 /// `ret` stores the block we jump to on a normal return, while `unwind`
154 /// stores the block used for cleanup during unwinding.
155 Goto { ret: Option<mir::BasicBlock>, unwind: StackPopUnwind },
156 /// Just do nothing: Used by Main and for the `box_alloc` hook in miri.
157 /// `cleanup` says whether locals are deallocated. Static computation
158 /// wants them leaked to intern what they need (and just throw away
159 /// the entire `ecx` when it is done).
160 None { cleanup: bool },
163 /// State of a local variable including a memoized layout
164 #[derive(Clone, PartialEq, Eq, HashStable)]
165 pub struct LocalState<'tcx, Tag: Provenance = AllocId> {
166 pub value: LocalValue<Tag>,
167 /// Don't modify if `Some`, this is only used to prevent computing the layout twice
168 #[stable_hasher(ignore)]
169 pub layout: Cell<Option<TyAndLayout<'tcx>>>,
172 /// Current value of a local variable
173 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Debug)] // Miri debug-prints these
174 pub enum LocalValue<Tag: Provenance = AllocId> {
175 /// This local is not currently alive, and cannot be used at all.
177 /// This local is alive but not yet initialized. It can be written to
178 /// but not read from or its address taken. Locals get initialized on
179 /// first write because for unsized locals, we do not know their size
182 /// A normal, live local.
183 /// Mostly for convenience, we re-use the `Operand` type here.
184 /// This is an optimization over just always having a pointer here;
185 /// we can thus avoid doing an allocation when the local just stores
186 /// immediate values *and* never has its address taken.
190 impl<'tcx, Tag: Provenance + 'static> LocalState<'tcx, Tag> {
191 /// Read the local's value or error if the local is not yet live or not live anymore.
193 /// Note: This may only be invoked from the `Machine::access_local` hook and not from
194 /// anywhere else. You may be invalidating machine invariants if you do!
195 pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
197 LocalValue::Dead => throw_ub!(DeadLocal),
198 LocalValue::Uninitialized => {
199 bug!("The type checker should prevent reading from a never-written local")
201 LocalValue::Live(val) => Ok(val),
205 /// Overwrite the local. If the local can be overwritten in place, return a reference
206 /// to do so; otherwise return the `MemPlace` to consult instead.
208 /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
209 /// anywhere else. You may be invalidating machine invariants if you do!
212 ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
214 LocalValue::Dead => throw_ub!(DeadLocal),
215 LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
217 local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Uninitialized) => {
224 impl<'mir, 'tcx, Tag: Provenance> Frame<'mir, 'tcx, Tag> {
225 pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
228 instance: self.instance,
229 return_to_block: self.return_to_block,
230 return_place: self.return_place,
234 tracing_span: self.tracing_span,
239 impl<'mir, 'tcx, Tag: Provenance, Extra> Frame<'mir, 'tcx, Tag, Extra> {
240 /// Get the current location within the Frame.
242 /// If this is `Err`, we are not currently executing any particular statement in
243 /// this frame (can happen e.g. during frame initialization, and during unwinding on
244 /// frames without cleanup code).
245 /// We basically abuse `Result` as `Either`.
248 pub fn current_loc(&self) -> Result<mir::Location, Span> {
252 /// Return the `SourceInfo` of the current instruction.
253 pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
254 self.loc.ok().map(|loc| self.body.source_info(loc))
257 pub fn current_span(&self) -> Span {
259 Ok(loc) => self.body.source_info(loc).span,
265 impl<'tcx> fmt::Display for FrameInfo<'tcx> {
266 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
267 ty::tls::with(|tcx| {
268 if tcx.def_key(self.instance.def_id()).disambiguated_data.data
269 == DefPathData::ClosureExpr
271 write!(f, "inside closure")?;
273 write!(f, "inside `{}`", self.instance)?;
275 if !self.span.is_dummy() {
276 let sm = tcx.sess.source_map();
277 let lo = sm.lookup_char_pos(self.span.lo());
281 sm.filename_for_diagnostics(&lo.file.name),
283 lo.col.to_usize() + 1
291 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
293 fn data_layout(&self) -> &TargetDataLayout {
294 &self.tcx.data_layout
298 impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
300 M: Machine<'mir, 'tcx>,
303 fn tcx(&self) -> TyCtxt<'tcx> {
308 impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
310 M: Machine<'mir, 'tcx>,
312 fn param_env(&self) -> ty::ParamEnv<'tcx> {
317 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
318 type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
321 fn layout_tcx_at_span(&self) -> Span {
326 fn handle_layout_err(
328 err: LayoutError<'tcx>,
331 ) -> InterpErrorInfo<'tcx> {
332 err_inval!(Layout(err)).into()
336 /// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
337 /// This test should be symmetric, as it is primarily about layout compatibility.
338 pub(super) fn mir_assign_valid_types<'tcx>(
340 param_env: ParamEnv<'tcx>,
341 src: TyAndLayout<'tcx>,
342 dest: TyAndLayout<'tcx>,
344 // Type-changing assignments can happen when subtyping is used. While
345 // all normal lifetimes are erased, higher-ranked types with their
346 // late-bound lifetimes are still around and can lead to type
347 // differences. So we compare ignoring lifetimes.
348 if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
349 // Make sure the layout is equal, too -- just to be safe. Miri really
350 // needs layout equality. For performance reason we skip this check when
351 // the types are equal. Equal types *can* have different layouts when
352 // enum downcast is involved (as enum variants carry the type of the
353 // enum), but those should never occur in assignments.
354 if cfg!(debug_assertions) || src.ty != dest.ty {
355 assert_eq!(src.layout, dest.layout);
363 /// Use the already known layout if given (but sanity check in debug mode),
364 /// or compute the layout.
365 #[cfg_attr(not(debug_assertions), inline(always))]
366 pub(super) fn from_known_layout<'tcx>(
368 param_env: ParamEnv<'tcx>,
369 known_layout: Option<TyAndLayout<'tcx>>,
370 compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
371 ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
374 Some(known_layout) => {
375 if cfg!(debug_assertions) {
376 let check_layout = compute()?;
377 if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
380 "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
391 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
395 param_env: ty::ParamEnv<'tcx>,
397 memory_extra: M::MemoryExtra,
401 tcx: tcx.at(root_span),
403 memory: Memory::new(tcx, memory_extra),
404 recursion_limit: tcx.recursion_limit(),
409 pub fn cur_span(&self) -> Span {
413 .find(|frame| !frame.instance.def.requires_caller_location(*self.tcx))
414 .map_or(self.tcx.span, |f| f.current_span())
418 pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
419 self.memory.scalar_to_ptr(scalar)
422 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
423 /// the machine pointer to the allocation. Must never be used
424 /// for any other pointers, nor for TLS statics.
426 /// Using the resulting pointer represents a *direct* access to that memory
427 /// (e.g. by directly using a `static`),
428 /// as opposed to access through a pointer that was created by the program.
430 /// This function can fail only if `ptr` points to an `extern static`.
432 pub fn global_base_pointer(&self, ptr: Pointer) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
433 self.memory.global_base_pointer(ptr)
437 pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
442 pub(crate) fn stack_mut(
444 ) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> {
449 pub fn frame_idx(&self) -> usize {
450 let stack = self.stack();
451 assert!(!stack.is_empty());
456 pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
457 self.stack().last().expect("no call frames exist")
461 pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
462 self.stack_mut().last_mut().expect("no call frames exist")
466 pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
471 pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
472 assert!(ty.abi.is_signed());
473 ty.size.sign_extend(value)
477 pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
478 ty.size.truncate(value)
482 pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
483 ty.is_freeze(self.tcx, self.param_env)
488 instance: ty::InstanceDef<'tcx>,
489 promoted: Option<mir::Promoted>,
490 ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
491 // do not continue if typeck errors occurred (can only occur in local crate)
492 let def = instance.with_opt_param();
493 if let Some(def) = def.as_local() {
494 if self.tcx.has_typeck_results(def.did) {
495 if let Some(error_reported) = self.tcx.typeck_opt_const_arg(def).tainted_by_errors {
496 throw_inval!(AlreadyReported(error_reported))
500 trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
501 if let Some(promoted) = promoted {
502 return Ok(&self.tcx.promoted_mir_opt_const_arg(def)[promoted]);
504 M::load_mir(self, instance)
507 /// Call this on things you got out of the MIR (so it is as generic as the current
508 /// stack frame), to bring it into the proper environment for this interpreter.
509 pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
512 ) -> Result<T, InterpError<'tcx>> {
513 self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
516 /// Call this on things you got out of the MIR (so it is as generic as the provided
517 /// stack frame), to bring it into the proper environment for this interpreter.
518 pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
520 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
522 ) -> Result<T, InterpError<'tcx>> {
525 .try_subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, value)
527 self.tcx.sess.delay_span_bug(
529 format!("failed to normalize {}", e.get_type_for_failure()).as_str(),
532 Err(InterpError::InvalidProgram(InvalidProgramInfo::TooGeneric))
536 /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
537 pub(super) fn resolve(
539 def: ty::WithOptConstParam<DefId>,
540 substs: SubstsRef<'tcx>,
541 ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
542 trace!("resolve: {:?}, {:#?}", def, substs);
543 trace!("param_env: {:#?}", self.param_env);
544 trace!("substs: {:#?}", substs);
545 match ty::Instance::resolve_opt_const_arg(*self.tcx, self.param_env, def, substs) {
546 Ok(Some(instance)) => Ok(instance),
547 Ok(None) => throw_inval!(TooGeneric),
549 // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
550 Err(error_reported) => throw_inval!(AlreadyReported(error_reported)),
555 pub fn layout_of_local(
557 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
559 layout: Option<TyAndLayout<'tcx>>,
560 ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
561 // `const_prop` runs into this with an invalid (empty) frame, so we
562 // have to support that case (mostly by skipping all caching).
563 match frame.locals.get(local).and_then(|state| state.layout.get()) {
565 let layout = from_known_layout(self.tcx, self.param_env, layout, || {
566 let local_ty = frame.body.local_decls[local].ty;
568 self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
569 self.layout_of(local_ty)
571 if let Some(state) = frame.locals.get(local) {
572 // Layouts of locals are requested a lot, so we cache them.
573 state.layout.set(Some(layout));
577 Some(layout) => Ok(layout),
581 /// Returns the actual dynamic size and alignment of the place at the given type.
582 /// Only the "meta" (metadata) part of the place matters.
583 /// This can fail to provide an answer for extern types.
584 pub(super) fn size_and_align_of(
586 metadata: &MemPlaceMeta<M::PointerTag>,
587 layout: &TyAndLayout<'tcx>,
588 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
589 if !layout.is_unsized() {
590 return Ok(Some((layout.size, layout.align.abi)));
592 match layout.ty.kind() {
593 ty::Adt(..) | ty::Tuple(..) => {
594 // First get the size of all statically known fields.
595 // Don't use type_of::sizing_type_of because that expects t to be sized,
596 // and it also rounds up to alignment, which we want to avoid,
597 // as the unsized field's alignment could be smaller.
598 assert!(!layout.ty.is_simd());
599 assert!(layout.fields.count() > 0);
600 trace!("DST layout: {:?}", layout);
602 let sized_size = layout.fields.offset(layout.fields.count() - 1);
603 let sized_align = layout.align.abi;
605 "DST {} statically sized prefix size: {:?} align: {:?}",
611 // Recurse to get the size of the dynamically sized field (must be
612 // the last field). Can't have foreign types here, how would we
613 // adjust alignment and size for them?
614 let field = layout.field(self, layout.fields.count() - 1);
615 let (unsized_size, unsized_align) =
616 match self.size_and_align_of(metadata, &field)? {
617 Some(size_and_align) => size_and_align,
619 // A field with an extern type. We don't know the actual dynamic size
625 // FIXME (#26403, #27023): We should be adding padding
626 // to `sized_size` (to accommodate the `unsized_align`
627 // required of the unsized field that follows) before
628 // summing it with `sized_size`. (Note that since #26403
629 // is unfixed, we do not yet add the necessary padding
630 // here. But this is where the add would go.)
632 // Return the sum of sizes and max of aligns.
633 let size = sized_size + unsized_size; // `Size` addition
635 // Choose max of two known alignments (combined value must
636 // be aligned according to more restrictive of the two).
637 let align = sized_align.max(unsized_align);
639 // Issue #27023: must add any necessary padding to `size`
640 // (to make it a multiple of `align`) before returning it.
641 let size = size.align_to(align);
643 // Check if this brought us over the size limit.
644 if size.bytes() >= self.tcx.data_layout.obj_size_bound() {
645 throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
647 Ok(Some((size, align)))
650 let vtable = self.scalar_to_ptr(metadata.unwrap_meta());
651 // Read size and align from vtable (already checks size).
652 Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
655 ty::Slice(_) | ty::Str => {
656 let len = metadata.unwrap_meta().to_machine_usize(self)?;
657 let elem = layout.field(self, 0);
659 // Make sure the slice is not too big.
660 let size = elem.size.checked_mul(len, self).ok_or_else(|| {
661 err_ub!(InvalidMeta("slice is bigger than largest supported object"))
663 Ok(Some((size, elem.align.abi)))
666 ty::Foreign(_) => Ok(None),
668 _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
672 pub fn size_and_align_of_mplace(
674 mplace: &MPlaceTy<'tcx, M::PointerTag>,
675 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
676 self.size_and_align_of(&mplace.meta, &mplace.layout)
679 pub fn push_stack_frame(
681 instance: ty::Instance<'tcx>,
682 body: &'mir mir::Body<'tcx>,
683 return_place: Option<&PlaceTy<'tcx, M::PointerTag>>,
684 return_to_block: StackPopCleanup,
685 ) -> InterpResult<'tcx> {
686 // first push a stack frame so we have access to the local substs
687 let pre_frame = Frame {
689 loc: Err(body.span), // Span used for errors caused during preamble.
691 return_place: return_place.copied(),
692 // empty local array, we fill it in below, after we are inside the stack frame and
693 // all methods actually know about the frame
694 locals: IndexVec::new(),
696 tracing_span: SpanGuard::new(),
699 let frame = M::init_frame_extra(self, pre_frame)?;
700 self.stack_mut().push(frame);
702 // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
703 for const_ in &body.required_consts {
704 let span = const_.span;
706 self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal)?;
707 self.mir_const_to_op(&const_, None).map_err(|err| {
708 // If there was an error, set the span of the current frame to this constant.
709 // Avoiding doing this when evaluation succeeds.
710 self.frame_mut().loc = Err(span);
715 // Locals are initially uninitialized.
716 let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
717 let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
719 // Now mark those locals as dead that we do not want to initialize
720 // Mark locals that use `Storage*` annotations as dead on function entry.
721 let always_live = AlwaysLiveLocals::new(self.body());
722 for local in locals.indices() {
723 if !always_live.contains(local) {
724 locals[local].value = LocalValue::Dead;
728 self.frame_mut().locals = locals;
729 M::after_stack_push(self)?;
730 self.frame_mut().loc = Ok(mir::Location::START);
732 let span = info_span!("frame", "{}", instance);
733 self.frame_mut().tracing_span.enter(span);
738 /// Jump to the given block.
740 pub fn go_to_block(&mut self, target: mir::BasicBlock) {
741 self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
744 /// *Return* to the given `target` basic block.
745 /// Do *not* use for unwinding! Use `unwind_to_block` instead.
747 /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
748 pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
749 if let Some(target) = target {
750 self.go_to_block(target);
753 throw_ub!(Unreachable)
757 /// *Unwind* to the given `target` basic block.
758 /// Do *not* use for returning! Use `return_to_block` instead.
760 /// If `target` is `StackPopUnwind::Skip`, that indicates the function does not need cleanup
761 /// during unwinding, and we will just keep propagating that upwards.
763 /// If `target` is `StackPopUnwind::NotAllowed`, that indicates the function does not allow
764 /// unwinding, and doing so is UB.
765 pub fn unwind_to_block(&mut self, target: StackPopUnwind) -> InterpResult<'tcx> {
766 self.frame_mut().loc = match target {
767 StackPopUnwind::Cleanup(block) => Ok(mir::Location { block, statement_index: 0 }),
768 StackPopUnwind::Skip => Err(self.frame_mut().body.span),
769 StackPopUnwind::NotAllowed => {
770 throw_ub_format!("unwinding past a stack frame that does not allow unwinding")
776 /// Pops the current frame from the stack, deallocating the
777 /// memory for allocated locals.
779 /// If `unwinding` is `false`, then we are performing a normal return
780 /// from a function. In this case, we jump back into the frame of the caller,
781 /// and continue execution as normal.
783 /// If `unwinding` is `true`, then we are in the middle of a panic,
784 /// and need to unwind this frame. In this case, we jump to the
785 /// `cleanup` block for the function, which is responsible for running
786 /// `Drop` impls for any locals that have been initialized at this point.
787 /// The cleanup block ends with a special `Resume` terminator, which will
788 /// cause us to continue unwinding.
789 pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
791 "popping stack frame ({})",
792 if unwinding { "during unwinding" } else { "returning from function" }
795 // Sanity check `unwinding`.
798 match self.frame().loc {
799 Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup,
804 if unwinding && self.frame_idx() == 0 {
805 throw_ub_format!("unwinding past the topmost frame of the stack");
809 self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
812 // Copy the return value to the caller's stack frame.
813 if let Some(ref return_place) = frame.return_place {
814 let op = self.access_local(&frame, mir::RETURN_PLACE, None)?;
815 self.copy_op_transmute(&op, return_place)?;
816 trace!("{:?}", self.dump_place(**return_place));
818 throw_ub!(Unreachable);
822 let return_to_block = frame.return_to_block;
824 // Now where do we jump next?
826 // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
827 // In that case, we return early. We also avoid validation in that case,
828 // because this is CTFE and the final value will be thoroughly validated anyway.
829 let cleanup = match return_to_block {
830 StackPopCleanup::Goto { .. } => true,
831 StackPopCleanup::None { cleanup, .. } => cleanup,
835 assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
836 assert!(!unwinding, "tried to skip cleanup during unwinding");
837 // Leak the locals, skip validation, skip machine hook.
841 // Cleanup: deallocate all locals that are backed by an allocation.
842 for local in &frame.locals {
843 self.deallocate_local(local.value)?;
846 if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
847 // The hook already did everything.
848 // We want to skip the `info!` below, hence early return.
851 // Normal return, figure out where to jump.
853 // Follow the unwind edge.
854 let unwind = match return_to_block {
855 StackPopCleanup::Goto { unwind, .. } => unwind,
856 StackPopCleanup::None { .. } => {
857 panic!("Encountered StackPopCleanup::None when unwinding!")
860 self.unwind_to_block(unwind)
862 // Follow the normal return edge.
863 match return_to_block {
864 StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
865 StackPopCleanup::None { .. } => Ok(()),
870 /// Mark a storage as live, killing the previous content.
871 pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
872 assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
873 trace!("{:?} is now live", local);
875 let local_val = LocalValue::Uninitialized;
876 // StorageLive expects the local to be dead, and marks it live.
877 let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
878 if !matches!(old, LocalValue::Dead) {
879 throw_ub_format!("StorageLive on a local that was already live");
884 pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
885 assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
886 trace!("{:?} is now dead", local);
888 // It is entirely okay for this local to be already dead (at least that's how we currently generate MIR)
889 let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
890 self.deallocate_local(old)?;
894 fn deallocate_local(&mut self, local: LocalValue<M::PointerTag>) -> InterpResult<'tcx> {
895 if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
896 // All locals have a backing allocation, even if the allocation is empty
897 // due to the local having ZST type. Hence we can `unwrap`.
899 "deallocating local {:?}: {:?}",
901 self.memory.dump_alloc(ptr.provenance.unwrap().get_alloc_id())
903 self.memory.deallocate(ptr, None, MemoryKind::Stack)?;
908 pub fn eval_to_allocation(
911 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
912 // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
913 // and thus don't care about the parameter environment. While we could just use
914 // `self.param_env`, that would mean we invoke the query to evaluate the static
915 // with different parameter environments, thus causing the static to be evaluated
917 let param_env = if self.tcx.is_static(gid.instance.def_id()) {
918 ty::ParamEnv::reveal_all()
922 let param_env = param_env.with_const();
923 let val = self.tcx.eval_to_allocation_raw(param_env.and(gid))?;
924 self.raw_const_to_mplace(val)
928 pub fn dump_place(&self, place: Place<M::PointerTag>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
929 PlacePrinter { ecx: self, place }
933 pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
934 let mut frames = Vec::new();
939 .skip_while(|frame| frame.instance.def.requires_caller_location(*self.tcx))
941 let lint_root = frame.current_source_info().and_then(|source_info| {
942 match &frame.body.source_scopes[source_info.scope].local_data {
943 mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
944 mir::ClearCrossCrate::Clear => None,
947 let span = frame.current_span();
949 frames.push(FrameInfo { span, instance: frame.instance, lint_root });
951 trace!("generate stacktrace: {:#?}", frames);
957 /// Helper struct for the `dump_place` function.
958 pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
959 ecx: &'a InterpCx<'mir, 'tcx, M>,
960 place: Place<M::PointerTag>,
963 impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
964 for PlacePrinter<'a, 'mir, 'tcx, M>
966 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
968 Place::Local { frame, local } => {
969 let mut allocs = Vec::new();
970 write!(fmt, "{:?}", local)?;
971 if frame != self.ecx.frame_idx() {
972 write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
976 match self.ecx.stack()[frame].locals[local].value {
977 LocalValue::Dead => write!(fmt, " is dead")?,
978 LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
979 LocalValue::Live(Operand::Indirect(mplace)) => {
982 " by align({}){} ref {:?}:",
983 mplace.align.bytes(),
985 MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
986 MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
990 allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
992 LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
993 write!(fmt, " {:?}", val)?;
994 if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
995 allocs.push(ptr.provenance.get_alloc_id());
998 LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
999 write!(fmt, " ({:?}, {:?})", val1, val2)?;
1000 if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
1001 allocs.push(ptr.provenance.get_alloc_id());
1003 if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
1004 allocs.push(ptr.provenance.get_alloc_id());
1009 write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
1011 Place::Ptr(mplace) => match mplace.ptr.provenance.map(Provenance::get_alloc_id) {
1012 Some(alloc_id) => write!(
1014 "by align({}) ref {:?}: {:?}",
1015 mplace.align.bytes(),
1017 self.ecx.memory.dump_alloc(alloc_id)
1019 ptr => write!(fmt, " integral by ref: {:?}", ptr),
1025 impl<'ctx, 'mir, 'tcx, Tag: Provenance, Extra> HashStable<StableHashingContext<'ctx>>
1026 for Frame<'mir, 'tcx, Tag, Extra>
1028 Extra: HashStable<StableHashingContext<'ctx>>,
1029 Tag: HashStable<StableHashingContext<'ctx>>,
1031 fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
1032 // Exhaustive match on fields to make sure we forget no field.
1043 body.hash_stable(hcx, hasher);
1044 instance.hash_stable(hcx, hasher);
1045 return_to_block.hash_stable(hcx, hasher);
1046 return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
1047 locals.hash_stable(hcx, hasher);
1048 loc.hash_stable(hcx, hasher);
1049 extra.hash_stable(hcx, hasher);