5 use rustc_data_structures::fx::FxHashMap;
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_hir::{self as hir, def::DefKind, def_id::DefId, definitions::DefPathData};
8 use rustc_index::vec::IndexVec;
9 use rustc_macros::HashStable;
10 use rustc_middle::ich::StableHashingContext;
11 use rustc_middle::mir;
12 use rustc_middle::mir::interpret::{GlobalId, InterpResult, Pointer, Scalar};
13 use rustc_middle::ty::layout::{self, TyAndLayout};
14 use rustc_middle::ty::{
15 self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
17 use rustc_span::{Pos, Span};
18 use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
21 Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, Operand, Place, PlaceTy,
22 ScalarMaybeUninit, StackPopJump,
24 use crate::transform::validate::equal_up_to_regions;
25 use crate::util::storage::AlwaysLiveLocals;
27 pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
28 /// Stores the `Machine` instance.
30 /// Note: the stack is provided by the machine.
33 /// The results of the type checker, from rustc.
34 /// The span in this is the "root" of the evaluation, i.e., the const
35 /// we are evaluating (if this is CTFE).
36 pub tcx: TyCtxtAt<'tcx>,
38 /// Bounds in scope for polymorphic evaluations.
39 pub(crate) param_env: ty::ParamEnv<'tcx>,
41 /// The virtual memory system.
42 pub memory: Memory<'mir, 'tcx, M>,
44 /// A cache for deduplicating vtables
46 FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
49 // The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
50 // boundary and dropped in the other thread, it would exit the span in the other thread.
51 struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
54 /// By default a `SpanGuard` does nothing.
56 Self(tracing::Span::none(), std::marker::PhantomData)
59 /// If a span is entered, we exit the previous span (if any, normally none) and enter the
60 /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
61 /// `Frame` by creating a dummy span to being with and then entering it once the frame has
63 fn enter(&mut self, span: tracing::Span) {
64 // This executes the destructor on the previous instance of `SpanGuard`, ensuring that
65 // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
66 // can't protect the tracing stack, but that'll just lead to weird logging, no actual
68 *self = Self(span, std::marker::PhantomData);
69 self.0.with_subscriber(|(id, dispatch)| {
75 impl Drop for SpanGuard {
77 self.0.with_subscriber(|(id, dispatch)| {
84 pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
85 ////////////////////////////////////////////////////////////////////////////////
86 // Function and callsite information
87 ////////////////////////////////////////////////////////////////////////////////
88 /// The MIR for the function called on this frame.
89 pub body: &'mir mir::Body<'tcx>,
91 /// The def_id and substs of the current function.
92 pub instance: ty::Instance<'tcx>,
94 /// Extra data for the machine.
97 ////////////////////////////////////////////////////////////////////////////////
98 // Return place and locals
99 ////////////////////////////////////////////////////////////////////////////////
100 /// Work to perform when returning from this function.
101 pub return_to_block: StackPopCleanup,
103 /// The location where the result of the current stack frame should be written to,
104 /// and its layout in the caller.
105 pub return_place: Option<PlaceTy<'tcx, Tag>>,
107 /// The list of locals for this stack frame, stored in order as
108 /// `[return_ptr, arguments..., variables..., temporaries...]`.
109 /// The locals are stored as `Option<Value>`s.
110 /// `None` represents a local that is currently dead, while a live local
111 /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
112 pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
114 /// The span of the `tracing` crate is stored here.
115 /// When the guard is dropped, the span is exited. This gives us
116 /// a full stack trace on all tracing statements.
117 tracing_span: SpanGuard,
119 ////////////////////////////////////////////////////////////////////////////////
120 // Current position within the function
121 ////////////////////////////////////////////////////////////////////////////////
122 /// If this is `Err`, we are not currently executing any particular statement in
123 /// this frame (can happen e.g. during frame initialization, and during unwinding on
124 /// frames without cleanup code).
125 /// We basically abuse `Result` as `Either`.
126 pub(super) loc: Result<mir::Location, Span>,
129 /// What we store about a frame in an interpreter backtrace.
131 pub struct FrameInfo<'tcx> {
132 pub instance: ty::Instance<'tcx>,
134 pub lint_root: Option<hir::HirId>,
137 #[derive(Clone, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
138 pub enum StackPopCleanup {
139 /// Jump to the next block in the caller, or cause UB if None (that's a function
140 /// that may never return). Also store layout of return place so
141 /// we can validate it at that layout.
142 /// `ret` stores the block we jump to on a normal return, while `unwind`
143 /// stores the block used for cleanup during unwinding.
144 Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> },
145 /// Just do nothing: Used by Main and for the `box_alloc` hook in miri.
146 /// `cleanup` says whether locals are deallocated. Static computation
147 /// wants them leaked to intern what they need (and just throw away
148 /// the entire `ecx` when it is done).
149 None { cleanup: bool },
152 /// State of a local variable including a memoized layout
153 #[derive(Clone, PartialEq, Eq, HashStable)]
154 pub struct LocalState<'tcx, Tag = ()> {
155 pub value: LocalValue<Tag>,
156 /// Don't modify if `Some`, this is only used to prevent computing the layout twice
157 #[stable_hasher(ignore)]
158 pub layout: Cell<Option<TyAndLayout<'tcx>>>,
161 /// Current value of a local variable
162 #[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
163 pub enum LocalValue<Tag = ()> {
164 /// This local is not currently alive, and cannot be used at all.
166 /// This local is alive but not yet initialized. It can be written to
167 /// but not read from or its address taken. Locals get initialized on
168 /// first write because for unsized locals, we do not know their size
171 /// A normal, live local.
172 /// Mostly for convenience, we re-use the `Operand` type here.
173 /// This is an optimization over just always having a pointer here;
174 /// we can thus avoid doing an allocation when the local just stores
175 /// immediate values *and* never has its address taken.
179 impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
180 /// Read the local's value or error if the local is not yet live or not live anymore.
182 /// Note: This may only be invoked from the `Machine::access_local` hook and not from
183 /// anywhere else. You may be invalidating machine invariants if you do!
184 pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
186 LocalValue::Dead => throw_ub!(DeadLocal),
187 LocalValue::Uninitialized => {
188 bug!("The type checker should prevent reading from a never-written local")
190 LocalValue::Live(val) => Ok(val),
194 /// Overwrite the local. If the local can be overwritten in place, return a reference
195 /// to do so; otherwise return the `MemPlace` to consult instead.
197 /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
198 /// anywhere else. You may be invalidating machine invariants if you do!
201 ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
203 LocalValue::Dead => throw_ub!(DeadLocal),
204 LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
206 local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Uninitialized) => {
213 impl<'mir, 'tcx, Tag> Frame<'mir, 'tcx, Tag> {
214 pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
217 instance: self.instance,
218 return_to_block: self.return_to_block,
219 return_place: self.return_place,
223 tracing_span: self.tracing_span,
228 impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
229 /// Return the `SourceInfo` of the current instruction.
230 pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
231 self.loc.ok().map(|loc| self.body.source_info(loc))
234 pub fn current_span(&self) -> Span {
236 Ok(loc) => self.body.source_info(loc).span,
242 impl<'tcx> fmt::Display for FrameInfo<'tcx> {
243 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
244 ty::tls::with(|tcx| {
245 if tcx.def_key(self.instance.def_id()).disambiguated_data.data
246 == DefPathData::ClosureExpr
248 write!(f, "inside closure")?;
250 write!(f, "inside `{}`", self.instance)?;
252 if !self.span.is_dummy() {
253 let lo = tcx.sess.source_map().lookup_char_pos(self.span.lo());
254 write!(f, " at {}:{}:{}", lo.file.name, lo.line, lo.col.to_usize() + 1)?;
261 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
263 fn data_layout(&self) -> &TargetDataLayout {
264 &self.tcx.data_layout
268 impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
270 M: Machine<'mir, 'tcx>,
273 fn tcx(&self) -> TyCtxt<'tcx> {
278 impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
280 M: Machine<'mir, 'tcx>,
282 fn param_env(&self) -> ty::ParamEnv<'tcx> {
287 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
289 type TyAndLayout = InterpResult<'tcx, TyAndLayout<'tcx>>;
292 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
294 .layout_of(self.param_env.and(ty))
295 .map_err(|layout| err_inval!(Layout(layout)).into())
299 /// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
300 /// This test should be symmetric, as it is primarily about layout compatibility.
301 pub(super) fn mir_assign_valid_types<'tcx>(
303 param_env: ParamEnv<'tcx>,
304 src: TyAndLayout<'tcx>,
305 dest: TyAndLayout<'tcx>,
307 // Type-changing assignments can happen when subtyping is used. While
308 // all normal lifetimes are erased, higher-ranked types with their
309 // late-bound lifetimes are still around and can lead to type
310 // differences. So we compare ignoring lifetimes.
311 if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
312 // Make sure the layout is equal, too -- just to be safe. Miri really
313 // needs layout equality. For performance reason we skip this check when
314 // the types are equal. Equal types *can* have different layouts when
315 // enum downcast is involved (as enum variants carry the type of the
316 // enum), but those should never occur in assignments.
317 if cfg!(debug_assertions) || src.ty != dest.ty {
318 assert_eq!(src.layout, dest.layout);
326 /// Use the already known layout if given (but sanity check in debug mode),
327 /// or compute the layout.
328 #[cfg_attr(not(debug_assertions), inline(always))]
329 pub(super) fn from_known_layout<'tcx>(
331 param_env: ParamEnv<'tcx>,
332 known_layout: Option<TyAndLayout<'tcx>>,
333 compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
334 ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
337 Some(known_layout) => {
338 if cfg!(debug_assertions) {
339 let check_layout = compute()?;
340 if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
343 "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
354 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
358 param_env: ty::ParamEnv<'tcx>,
360 memory_extra: M::MemoryExtra,
364 tcx: tcx.at(root_span),
366 memory: Memory::new(tcx, memory_extra),
367 vtables: FxHashMap::default(),
372 pub fn cur_span(&self) -> Span {
373 self.stack().last().map(|f| f.current_span()).unwrap_or(self.tcx.span)
379 scalar: Scalar<M::PointerTag>,
380 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
381 self.memory.force_ptr(scalar)
387 scalar: Scalar<M::PointerTag>,
389 ) -> InterpResult<'tcx, u128> {
390 self.memory.force_bits(scalar, size)
393 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
394 /// the machine pointer to the allocation. Must never be used
395 /// for any other pointers, nor for TLS statics.
397 /// Using the resulting pointer represents a *direct* access to that memory
398 /// (e.g. by directly using a `static`),
399 /// as opposed to access through a pointer that was created by the program.
401 /// This function can fail only if `ptr` points to an `extern static`.
403 pub fn global_base_pointer(&self, ptr: Pointer) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
404 self.memory.global_base_pointer(ptr)
408 pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
413 pub(crate) fn stack_mut(
415 ) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> {
420 pub fn frame_idx(&self) -> usize {
421 let stack = self.stack();
422 assert!(!stack.is_empty());
427 pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
428 self.stack().last().expect("no call frames exist")
432 pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
433 self.stack_mut().last_mut().expect("no call frames exist")
437 pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
442 pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
443 assert!(ty.abi.is_signed());
444 ty.size.sign_extend(value)
448 pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
449 ty.size.truncate(value)
453 pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
454 ty.is_sized(self.tcx, self.param_env)
458 pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
459 ty.is_freeze(self.tcx, self.param_env)
464 instance: ty::InstanceDef<'tcx>,
465 promoted: Option<mir::Promoted>,
466 ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
467 // do not continue if typeck errors occurred (can only occur in local crate)
468 let def = instance.with_opt_param();
469 if let Some(def) = def.as_local() {
470 if self.tcx.has_typeck_results(def.did) {
471 if let Some(error_reported) = self.tcx.typeck_opt_const_arg(def).tainted_by_errors {
472 throw_inval!(AlreadyReported(error_reported))
476 trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
477 if let Some(promoted) = promoted {
478 return Ok(&self.tcx.promoted_mir_opt_const_arg(def)[promoted]);
481 ty::InstanceDef::Item(def) => {
482 if self.tcx.is_mir_available(def.did) {
483 Ok(self.tcx.optimized_mir_opt_const_arg(def))
485 throw_unsup!(NoMirFor(def.did))
488 _ => Ok(self.tcx.instance_mir(instance)),
492 /// Call this on things you got out of the MIR (so it is as generic as the current
493 /// stack frame), to bring it into the proper environment for this interpreter.
494 pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
498 self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
501 /// Call this on things you got out of the MIR (so it is as generic as the provided
502 /// stack frame), to bring it into the proper environment for this interpreter.
503 pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
505 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
508 if let Some(substs) = frame.instance.substs_for_mir_body() {
509 self.tcx.subst_and_normalize_erasing_regions(substs, self.param_env, &value)
511 self.tcx.normalize_erasing_regions(self.param_env, value)
515 /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
516 pub(super) fn resolve(
518 def: ty::WithOptConstParam<DefId>,
519 substs: SubstsRef<'tcx>,
520 ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
521 trace!("resolve: {:?}, {:#?}", def, substs);
522 trace!("param_env: {:#?}", self.param_env);
523 trace!("substs: {:#?}", substs);
524 match ty::Instance::resolve_opt_const_arg(*self.tcx, self.param_env, def, substs) {
525 Ok(Some(instance)) => Ok(instance),
526 Ok(None) => throw_inval!(TooGeneric),
528 // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
529 Err(error_reported) => throw_inval!(AlreadyReported(error_reported)),
533 pub fn layout_of_local(
535 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
537 layout: Option<TyAndLayout<'tcx>>,
538 ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
539 // `const_prop` runs into this with an invalid (empty) frame, so we
540 // have to support that case (mostly by skipping all caching).
541 match frame.locals.get(local).and_then(|state| state.layout.get()) {
543 let layout = from_known_layout(self.tcx, self.param_env, layout, || {
544 let local_ty = frame.body.local_decls[local].ty;
546 self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty);
547 self.layout_of(local_ty)
549 if let Some(state) = frame.locals.get(local) {
550 // Layouts of locals are requested a lot, so we cache them.
551 state.layout.set(Some(layout));
555 Some(layout) => Ok(layout),
559 /// Returns the actual dynamic size and alignment of the place at the given type.
560 /// Only the "meta" (metadata) part of the place matters.
561 /// This can fail to provide an answer for extern types.
562 pub(super) fn size_and_align_of(
564 metadata: MemPlaceMeta<M::PointerTag>,
565 layout: TyAndLayout<'tcx>,
566 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
567 if !layout.is_unsized() {
568 return Ok(Some((layout.size, layout.align.abi)));
570 match layout.ty.kind() {
571 ty::Adt(..) | ty::Tuple(..) => {
572 // First get the size of all statically known fields.
573 // Don't use type_of::sizing_type_of because that expects t to be sized,
574 // and it also rounds up to alignment, which we want to avoid,
575 // as the unsized field's alignment could be smaller.
576 assert!(!layout.ty.is_simd());
577 assert!(layout.fields.count() > 0);
578 trace!("DST layout: {:?}", layout);
580 let sized_size = layout.fields.offset(layout.fields.count() - 1);
581 let sized_align = layout.align.abi;
583 "DST {} statically sized prefix size: {:?} align: {:?}",
589 // Recurse to get the size of the dynamically sized field (must be
590 // the last field). Can't have foreign types here, how would we
591 // adjust alignment and size for them?
592 let field = layout.field(self, layout.fields.count() - 1)?;
593 let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
594 Some(size_and_align) => size_and_align,
596 // A field with extern type. If this field is at offset 0, we behave
597 // like the underlying extern type.
598 // FIXME: Once we have made decisions for how to handle size and alignment
599 // of `extern type`, this should be adapted. It is just a temporary hack
600 // to get some code to work that probably ought to work.
601 if sized_size == Size::ZERO {
606 "Fields cannot be extern types, unless they are at offset 0"
612 // FIXME (#26403, #27023): We should be adding padding
613 // to `sized_size` (to accommodate the `unsized_align`
614 // required of the unsized field that follows) before
615 // summing it with `sized_size`. (Note that since #26403
616 // is unfixed, we do not yet add the necessary padding
617 // here. But this is where the add would go.)
619 // Return the sum of sizes and max of aligns.
620 let size = sized_size + unsized_size; // `Size` addition
622 // Choose max of two known alignments (combined value must
623 // be aligned according to more restrictive of the two).
624 let align = sized_align.max(unsized_align);
626 // Issue #27023: must add any necessary padding to `size`
627 // (to make it a multiple of `align`) before returning it.
628 let size = size.align_to(align);
630 // Check if this brought us over the size limit.
631 if size.bytes() >= self.tcx.data_layout.obj_size_bound() {
632 throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
634 Ok(Some((size, align)))
637 let vtable = metadata.unwrap_meta();
638 // Read size and align from vtable (already checks size).
639 Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
642 ty::Slice(_) | ty::Str => {
643 let len = metadata.unwrap_meta().to_machine_usize(self)?;
644 let elem = layout.field(self, 0)?;
646 // Make sure the slice is not too big.
647 let size = elem.size.checked_mul(len, self).ok_or_else(|| {
648 err_ub!(InvalidMeta("slice is bigger than largest supported object"))
650 Ok(Some((size, elem.align.abi)))
653 ty::Foreign(_) => Ok(None),
655 _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
659 pub fn size_and_align_of_mplace(
661 mplace: MPlaceTy<'tcx, M::PointerTag>,
662 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
663 self.size_and_align_of(mplace.meta, mplace.layout)
666 pub fn push_stack_frame(
668 instance: ty::Instance<'tcx>,
669 body: &'mir mir::Body<'tcx>,
670 return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
671 return_to_block: StackPopCleanup,
672 ) -> InterpResult<'tcx> {
673 // first push a stack frame so we have access to the local substs
674 let pre_frame = Frame {
676 loc: Err(body.span), // Span used for errors caused during preamble.
679 // empty local array, we fill it in below, after we are inside the stack frame and
680 // all methods actually know about the frame
681 locals: IndexVec::new(),
683 tracing_span: SpanGuard::new(),
686 let frame = M::init_frame_extra(self, pre_frame)?;
687 self.stack_mut().push(frame);
689 // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
690 for const_ in &body.required_consts {
691 let span = const_.span;
693 self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal);
694 self.const_to_op(const_, None).map_err(|err| {
695 // If there was an error, set the span of the current frame to this constant.
696 // Avoiding doing this when evaluation succeeds.
697 self.frame_mut().loc = Err(span);
702 // Locals are initially uninitialized.
703 let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
704 let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
706 // Now mark those locals as dead that we do not want to initialize
707 match self.tcx.def_kind(instance.def_id()) {
708 // statics and constants don't have `Storage*` statements, no need to look for them
710 // FIXME: The above is likely untrue. See
711 // <https://github.com/rust-lang/rust/pull/70004#issuecomment-602022110>. Is it
712 // okay to ignore `StorageDead`/`StorageLive` annotations during CTFE?
713 DefKind::Static | DefKind::Const | DefKind::AssocConst => {}
715 // Mark locals that use `Storage*` annotations as dead on function entry.
716 let always_live = AlwaysLiveLocals::new(self.body());
717 for local in locals.indices() {
718 if !always_live.contains(local) {
719 locals[local].value = LocalValue::Dead;
725 self.frame_mut().locals = locals;
726 M::after_stack_push(self)?;
727 self.frame_mut().loc = Ok(mir::Location::START);
729 let span = info_span!("frame", "{}", instance);
730 self.frame_mut().tracing_span.enter(span);
735 /// Jump to the given block.
737 pub fn go_to_block(&mut self, target: mir::BasicBlock) {
738 self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
741 /// *Return* to the given `target` basic block.
742 /// Do *not* use for unwinding! Use `unwind_to_block` instead.
744 /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
745 pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
746 if let Some(target) = target {
747 self.go_to_block(target);
750 throw_ub!(Unreachable)
754 /// *Unwind* to the given `target` basic block.
755 /// Do *not* use for returning! Use `return_to_block` instead.
757 /// If `target` is `None`, that indicates the function does not need cleanup during
758 /// unwinding, and we will just keep propagating that upwards.
759 pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) {
760 self.frame_mut().loc = match target {
761 Some(block) => Ok(mir::Location { block, statement_index: 0 }),
762 None => Err(self.frame_mut().body.span),
766 /// Pops the current frame from the stack, deallocating the
767 /// memory for allocated locals.
769 /// If `unwinding` is `false`, then we are performing a normal return
770 /// from a function. In this case, we jump back into the frame of the caller,
771 /// and continue execution as normal.
773 /// If `unwinding` is `true`, then we are in the middle of a panic,
774 /// and need to unwind this frame. In this case, we jump to the
775 /// `cleanup` block for the function, which is responsible for running
776 /// `Drop` impls for any locals that have been initialized at this point.
777 /// The cleanup block ends with a special `Resume` terminator, which will
778 /// cause us to continue unwinding.
779 pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
781 "popping stack frame ({})",
782 if unwinding { "during unwinding" } else { "returning from function" }
785 // Sanity check `unwinding`.
788 match self.frame().loc {
789 Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup,
794 if unwinding && self.frame_idx() == 0 {
795 throw_ub_format!("unwinding past the topmost frame of the stack");
799 self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
802 // Copy the return value to the caller's stack frame.
803 if let Some(return_place) = frame.return_place {
804 let op = self.access_local(&frame, mir::RETURN_PLACE, None)?;
805 self.copy_op_transmute(op, return_place)?;
806 trace!("{:?}", self.dump_place(*return_place));
808 throw_ub!(Unreachable);
812 // Now where do we jump next?
814 // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
815 // In that case, we return early. We also avoid validation in that case,
816 // because this is CTFE and the final value will be thoroughly validated anyway.
817 let (cleanup, next_block) = match frame.return_to_block {
818 StackPopCleanup::Goto { ret, unwind } => {
819 (true, Some(if unwinding { unwind } else { ret }))
821 StackPopCleanup::None { cleanup, .. } => (cleanup, None),
825 assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
826 assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
827 assert!(!unwinding, "tried to skip cleanup during unwinding");
828 // Leak the locals, skip validation, skip machine hook.
832 // Cleanup: deallocate all locals that are backed by an allocation.
833 for local in &frame.locals {
834 self.deallocate_local(local.value)?;
837 if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
838 // The hook already did everything.
839 // We want to skip the `info!` below, hence early return.
842 // Normal return, figure out where to jump.
844 // Follow the unwind edge.
845 let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!");
846 self.unwind_to_block(unwind);
848 // Follow the normal return edge.
849 if let Some(ret) = next_block {
850 self.return_to_block(ret)?;
857 /// Mark a storage as live, killing the previous content and returning it.
858 /// Remember to deallocate that!
862 ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
863 assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
864 trace!("{:?} is now live", local);
866 let local_val = LocalValue::Uninitialized;
867 // StorageLive *always* kills the value that's currently stored.
868 // However, we do not error if the variable already is live;
869 // see <https://github.com/rust-lang/rust/issues/42371>.
870 Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
873 /// Returns the old value of the local.
874 /// Remember to deallocate that!
875 pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
876 assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
877 trace!("{:?} is now dead", local);
879 mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
882 pub(super) fn deallocate_local(
884 local: LocalValue<M::PointerTag>,
885 ) -> InterpResult<'tcx> {
886 // FIXME: should we tell the user that there was a local which was never written to?
887 if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
888 // All locals have a backing allocation, even if the allocation is empty
889 // due to the local having ZST type.
890 let ptr = ptr.assert_ptr();
891 trace!("deallocating local: {:?}", self.memory.dump_alloc(ptr.alloc_id));
892 self.memory.deallocate_local(ptr)?;
897 pub fn eval_to_allocation(
900 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
901 // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
902 // and thus don't care about the parameter environment. While we could just use
903 // `self.param_env`, that would mean we invoke the query to evaluate the static
904 // with different parameter environments, thus causing the static to be evaluated
906 let param_env = if self.tcx.is_static(gid.instance.def_id()) {
907 ty::ParamEnv::reveal_all()
911 let val = self.tcx.eval_to_allocation_raw(param_env.and(gid))?;
912 self.raw_const_to_mplace(val)
916 pub fn dump_place(&'a self, place: Place<M::PointerTag>) -> PlacePrinter<'a, 'mir, 'tcx, M> {
917 PlacePrinter { ecx: self, place }
921 pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
922 let mut frames = Vec::new();
923 for frame in self.stack().iter().rev() {
924 let lint_root = frame.current_source_info().and_then(|source_info| {
925 match &frame.body.source_scopes[source_info.scope].local_data {
926 mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
927 mir::ClearCrossCrate::Clear => None,
930 let span = frame.current_span();
932 frames.push(FrameInfo { span, instance: frame.instance, lint_root });
934 trace!("generate stacktrace: {:#?}", frames);
940 /// Helper struct for the `dump_place` function.
941 pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
942 ecx: &'a InterpCx<'mir, 'tcx, M>,
943 place: Place<M::PointerTag>,
946 impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
947 for PlacePrinter<'a, 'mir, 'tcx, M>
949 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
951 Place::Local { frame, local } => {
952 let mut allocs = Vec::new();
953 write!(fmt, "{:?}", local)?;
954 if frame != self.ecx.frame_idx() {
955 write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
959 match self.ecx.stack()[frame].locals[local].value {
960 LocalValue::Dead => write!(fmt, " is dead")?,
961 LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
962 LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr {
963 Scalar::Ptr(ptr) => {
966 " by align({}){} ref:",
967 mplace.align.bytes(),
969 MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
970 MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
973 allocs.push(ptr.alloc_id);
975 ptr => write!(fmt, " by integral ref: {:?}", ptr)?,
977 LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
978 write!(fmt, " {:?}", val)?;
979 if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val {
980 allocs.push(ptr.alloc_id);
983 LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
984 write!(fmt, " ({:?}, {:?})", val1, val2)?;
985 if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 {
986 allocs.push(ptr.alloc_id);
988 if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 {
989 allocs.push(ptr.alloc_id);
994 write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
996 Place::Ptr(mplace) => match mplace.ptr {
997 Scalar::Ptr(ptr) => write!(
999 "by align({}) ref: {:?}",
1000 mplace.align.bytes(),
1001 self.ecx.memory.dump_alloc(ptr.alloc_id)
1003 ptr => write!(fmt, " integral by ref: {:?}", ptr),
1009 impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>>
1010 for Frame<'mir, 'tcx, Tag, Extra>
1012 Extra: HashStable<StableHashingContext<'ctx>>,
1013 Tag: HashStable<StableHashingContext<'ctx>>,
1015 fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
1016 // Exhaustive match on fields to make sure we forget no field.
1027 body.hash_stable(hcx, hasher);
1028 instance.hash_stable(hcx, hasher);
1029 return_to_block.hash_stable(hcx, hasher);
1030 return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
1031 locals.hash_stable(hcx, hasher);
1032 loc.hash_stable(hcx, hasher);
1033 extra.hash_stable(hcx, hasher);