1 //! Global machine state as well as implementation of the interpreter engine
5 use std::cell::RefCell;
7 use std::num::NonZeroU64;
8 use std::time::Instant;
11 use rand::rngs::StdRng;
12 use rand::SeedableRng;
14 use rustc_data_structures::fx::FxHashMap;
19 layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout},
23 use rustc_span::def_id::DefId;
24 use rustc_span::symbol::{sym, Symbol};
25 use rustc_target::abi::Size;
26 use rustc_target::spec::abi::Abi;
30 // Some global facts about the emulated machine.
31 pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture
32 pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations
33 pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever
34 pub const NUM_CPUS: u64 = 1;
36 /// Extra data stored with each stack frame
37 pub struct FrameData<'tcx> {
38 /// Extra data for Stacked Borrows.
39 pub call_id: stacked_borrows::CallId,
41 /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
42 /// called by `try`). When this frame is popped during unwinding a panic,
43 /// we stop unwinding, use the `CatchUnwindData` to handle catching.
44 pub catch_unwind: Option<CatchUnwindData<'tcx>>,
46 /// If `measureme` profiling is enabled, holds timing information
47 /// for the start of this frame. When we finish executing this frame,
48 /// we use this to register a completed event with `measureme`.
49 pub timing: Option<measureme::DetachedTiming>,
52 impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
53 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
54 // Omitting `timing`, it does not support `Debug`.
55 let FrameData { call_id, catch_unwind, timing: _ } = self;
56 f.debug_struct("FrameData")
57 .field("call_id", call_id)
58 .field("catch_unwind", catch_unwind)
63 /// Extra memory kinds
64 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
65 pub enum MiriMemoryKind {
66 /// `__rust_alloc` memory.
70 /// Windows `HeapAlloc` memory.
72 /// Memory for args, errno, and other parts of the machine-managed environment.
73 /// This memory may leak.
75 /// Memory for env vars. Separate from `Machine` because we clean it up and leak-check it.
77 /// Globals copied from `tcx`.
78 /// This memory may leak.
80 /// Memory for extern statics.
81 /// This memory may leak.
83 /// Memory for thread-local statics.
84 /// This memory may leak.
88 impl Into<MemoryKind<MiriMemoryKind>> for MiriMemoryKind {
90 fn into(self) -> MemoryKind<MiriMemoryKind> {
91 MemoryKind::Machine(self)
95 impl MayLeak for MiriMemoryKind {
97 fn may_leak(self) -> bool {
98 use self::MiriMemoryKind::*;
100 Rust | C | WinHeap | Env => false,
101 Machine | Global | ExternStatic | Tls => true,
106 impl fmt::Display for MiriMemoryKind {
107 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
108 use self::MiriMemoryKind::*;
110 Rust => write!(f, "Rust heap"),
111 C => write!(f, "C heap"),
112 WinHeap => write!(f, "Windows heap"),
113 Machine => write!(f, "machine-managed memory"),
114 Env => write!(f, "environment variable"),
115 Global => write!(f, "global (static or const)"),
116 ExternStatic => write!(f, "extern static"),
117 Tls => write!(f, "thread-local static"),
122 /// Pointer provenance (tag).
123 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
125 pub alloc_id: AllocId,
126 /// Stacked Borrows tag.
130 impl Provenance for Tag {
131 /// We use absolute addresses in the `offset` of a `Pointer<Tag>`.
132 const OFFSET_IS_ADDR: bool = true;
134 /// We cannot err on partial overwrites, it happens too often in practice (due to unions).
135 const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = false;
137 fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
138 let (tag, addr) = ptr.into_parts(); // address is absolute
139 write!(f, "0x{:x}", addr.bytes())?;
140 // Forward `alternate` flag to `alloc_id` printing.
142 write!(f, "[{:#?}]", tag.alloc_id)?;
144 write!(f, "[{:?}]", tag.alloc_id)?;
146 // Print Stacked Borrows tag.
147 write!(f, "{:?}", tag.sb)
150 fn get_alloc_id(self) -> AllocId {
155 /// Extra per-allocation data
156 #[derive(Debug, Clone)]
157 pub struct AllocExtra {
158 /// Stacked Borrows state is only added if it is enabled.
159 pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
160 /// Data race detection via the use of a vector-clock,
161 /// this is only added if it is enabled.
162 pub data_race: Option<data_race::AllocExtra>,
165 /// Extra global memory data
167 pub struct MemoryExtra {
168 pub stacked_borrows: Option<stacked_borrows::MemoryExtra>,
169 pub data_race: Option<data_race::MemoryExtra>,
170 pub intptrcast: intptrcast::MemoryExtra,
172 /// Mapping extern static names to their base pointer.
173 extern_statics: FxHashMap<Symbol, Pointer<Tag>>,
175 /// The random number generator used for resolving non-determinism.
176 /// Needs to be queried by ptr_to_int, hence needs interior mutability.
177 pub(crate) rng: RefCell<StdRng>,
179 /// An allocation ID to report when it is being allocated
180 /// (helps for debugging memory leaks and use after free bugs).
181 tracked_alloc_id: Option<AllocId>,
183 /// Controls whether alignment of memory accesses is being checked.
184 pub(crate) check_alignment: AlignmentCheck,
186 /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
187 pub(crate) cmpxchg_weak_failure_rate: f64,
191 pub fn new(config: &MiriConfig) -> Self {
192 let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
193 let stacked_borrows = if config.stacked_borrows {
194 Some(RefCell::new(stacked_borrows::GlobalState::new(
195 config.tracked_pointer_tag,
196 config.tracked_call_id,
203 if config.data_race_detector { Some(data_race::GlobalState::new()) } else { None };
207 intptrcast: Default::default(),
208 extern_statics: FxHashMap::default(),
209 rng: RefCell::new(rng),
210 tracked_alloc_id: config.tracked_alloc_id,
211 check_alignment: config.check_alignment,
212 cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
216 fn add_extern_static<'tcx, 'mir>(
217 this: &mut MiriEvalContext<'mir, 'tcx>,
219 ptr: Pointer<Option<Tag>>,
221 let ptr = ptr.into_pointer_or_addr().unwrap();
222 this.memory.extra.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
225 /// Sets up the "extern statics" for this machine.
226 pub fn init_extern_statics<'tcx, 'mir>(
227 this: &mut MiriEvalContext<'mir, 'tcx>,
228 ) -> InterpResult<'tcx> {
229 match this.tcx.sess.target.os.as_str() {
231 // "__cxa_thread_atexit_impl"
232 // This should be all-zero, pointer-sized.
233 let layout = this.machine.layouts.usize;
234 let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
235 this.write_scalar(Scalar::from_machine_usize(0, this), &place.into())?;
236 Self::add_extern_static(this, "__cxa_thread_atexit_impl", place.ptr);
238 Self::add_extern_static(
241 this.machine.env_vars.environ.unwrap().ptr,
246 // This is some obscure hack that is part of the Windows TLS story. It's a `u8`.
247 let layout = this.machine.layouts.u8;
248 let place = this.allocate(layout, MiriMemoryKind::ExternStatic.into())?;
249 this.write_scalar(Scalar::from_u8(0), &place.into())?;
250 Self::add_extern_static(this, "_tls_used", place.ptr);
252 _ => {} // No "extern statics" supported on this target
258 /// Precomputed layouts of primitive types
259 pub struct PrimitiveLayouts<'tcx> {
260 pub unit: TyAndLayout<'tcx>,
261 pub i8: TyAndLayout<'tcx>,
262 pub i32: TyAndLayout<'tcx>,
263 pub isize: TyAndLayout<'tcx>,
264 pub u8: TyAndLayout<'tcx>,
265 pub u32: TyAndLayout<'tcx>,
266 pub usize: TyAndLayout<'tcx>,
269 impl<'mir, 'tcx: 'mir> PrimitiveLayouts<'tcx> {
270 fn new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, LayoutError<'tcx>> {
272 unit: layout_cx.layout_of(layout_cx.tcx.mk_unit())?,
273 i8: layout_cx.layout_of(layout_cx.tcx.types.i8)?,
274 i32: layout_cx.layout_of(layout_cx.tcx.types.i32)?,
275 isize: layout_cx.layout_of(layout_cx.tcx.types.isize)?,
276 u8: layout_cx.layout_of(layout_cx.tcx.types.u8)?,
277 u32: layout_cx.layout_of(layout_cx.tcx.types.u32)?,
278 usize: layout_cx.layout_of(layout_cx.tcx.types.usize)?,
283 /// The machine itself.
284 pub struct Evaluator<'mir, 'tcx> {
285 /// Environment variables set by `setenv`.
286 /// Miri does not expose env vars from the host to the emulated program.
287 pub(crate) env_vars: EnvVars<'tcx>,
289 /// Program arguments (`Option` because we can only initialize them after creating the ecx).
290 /// These are *pointers* to argc/argv because macOS.
291 /// We also need the full command line as one string because of Windows.
292 pub(crate) argc: Option<MemPlace<Tag>>,
293 pub(crate) argv: Option<MemPlace<Tag>>,
294 pub(crate) cmd_line: Option<MemPlace<Tag>>,
297 pub(crate) tls: TlsData<'tcx>,
299 /// What should Miri do when an op requires communicating with the host,
300 /// such as accessing host env vars, random number generation, and
301 /// file system access.
302 pub(crate) isolated_op: IsolatedOp,
304 /// Whether to enforce the validity invariant.
305 pub(crate) validate: bool,
307 /// Whether to enforce validity (e.g., initialization) of integers and floats.
308 pub(crate) enforce_number_validity: bool,
310 /// Whether to enforce [ABI](Abi) of function calls.
311 pub(crate) enforce_abi: bool,
313 pub(crate) file_handler: shims::posix::FileHandler,
314 pub(crate) dir_handler: shims::posix::DirHandler,
316 /// The "time anchor" for this machine's monotone clock (for `Instant` simulation).
317 pub(crate) time_anchor: Instant,
319 /// The set of threads.
320 pub(crate) threads: ThreadManager<'mir, 'tcx>,
322 /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
323 pub(crate) layouts: PrimitiveLayouts<'tcx>,
325 /// Allocations that are considered roots of static memory (that may leak).
326 pub(crate) static_roots: Vec<AllocId>,
328 /// The `measureme` profiler used to record timing information about
329 /// the emulated program.
330 profiler: Option<measureme::Profiler>,
331 /// Used with `profiler` to cache the `StringId`s for event names
332 /// uesd with `measureme`.
333 string_cache: FxHashMap<String, measureme::StringId>,
335 /// Cache of `Instance` exported under the given `Symbol` name.
336 /// `None` means no `Instance` exported under the given name is found.
337 pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
339 /// Whether to raise a panic in the context of the evaluated process when unsupported
340 /// functionality is encountered. If `false`, an error is propagated in the Miri application context
341 /// instead (default behavior)
342 pub(crate) panic_on_unsupported: bool,
345 impl<'mir, 'tcx> Evaluator<'mir, 'tcx> {
346 pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Self {
348 PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
349 let profiler = config.measureme_out.as_ref().map(|out| {
350 measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler")
353 // `env_vars` could be initialized properly here if `Memory` were available before
354 // calling this method.
355 env_vars: EnvVars::default(),
359 tls: TlsData::default(),
360 isolated_op: config.isolated_op,
361 validate: config.validate,
362 enforce_number_validity: config.check_number_validity,
363 enforce_abi: config.check_abi,
364 file_handler: Default::default(),
365 dir_handler: Default::default(),
366 time_anchor: Instant::now(),
368 threads: ThreadManager::default(),
369 static_roots: Vec::new(),
371 string_cache: Default::default(),
372 exported_symbols_cache: FxHashMap::default(),
373 panic_on_unsupported: config.panic_on_unsupported,
377 pub(crate) fn communicate(&self) -> bool {
378 self.isolated_op == IsolatedOp::Allow
382 /// A rustc InterpCx for Miri.
383 pub type MiriEvalContext<'mir, 'tcx> = InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>;
385 /// A little trait that's useful to be inherited by extension traits.
386 pub trait MiriEvalContextExt<'mir, 'tcx> {
387 fn eval_context_ref<'a>(&'a self) -> &'a MiriEvalContext<'mir, 'tcx>;
388 fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriEvalContext<'mir, 'tcx>;
390 impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {
392 fn eval_context_ref(&self) -> &MiriEvalContext<'mir, 'tcx> {
396 fn eval_context_mut(&mut self) -> &mut MiriEvalContext<'mir, 'tcx> {
401 /// Machine hook implementations.
402 impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
403 type MemoryKind = MiriMemoryKind;
405 type FrameExtra = FrameData<'tcx>;
406 type MemoryExtra = MemoryExtra;
407 type AllocExtra = AllocExtra;
408 type PointerTag = Tag;
409 type ExtraFnVal = Dlsym;
412 MonoHashMap<AllocId, (MemoryKind<MiriMemoryKind>, Allocation<Tag, Self::AllocExtra>)>;
414 const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
416 const PANIC_ON_ALLOC_FAIL: bool = false;
419 fn enforce_alignment(memory_extra: &MemoryExtra) -> bool {
420 memory_extra.check_alignment != AlignmentCheck::None
424 fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool {
425 memory_extra.check_alignment == AlignmentCheck::Int
429 fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
434 fn enforce_number_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
435 ecx.machine.enforce_number_validity
439 fn enforce_abi(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
440 ecx.machine.enforce_abi
444 fn find_mir_or_eval_fn(
445 ecx: &mut InterpCx<'mir, 'tcx, Self>,
446 instance: ty::Instance<'tcx>,
448 args: &[OpTy<'tcx, Tag>],
449 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
450 unwind: StackPopUnwind,
451 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
452 ecx.find_mir_or_eval_fn(instance, abi, args, ret, unwind)
457 ecx: &mut InterpCx<'mir, 'tcx, Self>,
460 args: &[OpTy<'tcx, Tag>],
461 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
462 _unwind: StackPopUnwind,
463 ) -> InterpResult<'tcx> {
464 ecx.call_dlsym(fn_val, abi, args, ret)
469 ecx: &mut rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>,
470 instance: ty::Instance<'tcx>,
471 args: &[OpTy<'tcx, Tag>],
472 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
473 unwind: StackPopUnwind,
474 ) -> InterpResult<'tcx> {
475 ecx.call_intrinsic(instance, args, ret, unwind)
480 ecx: &mut InterpCx<'mir, 'tcx, Self>,
481 msg: &mir::AssertMessage<'tcx>,
482 unwind: Option<mir::BasicBlock>,
483 ) -> InterpResult<'tcx> {
484 ecx.assert_panic(msg, unwind)
488 fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
489 throw_machine_stop!(TerminationInfo::Abort(msg))
494 ecx: &rustc_const_eval::interpret::InterpCx<'mir, 'tcx, Self>,
496 left: &ImmTy<'tcx, Tag>,
497 right: &ImmTy<'tcx, Tag>,
498 ) -> InterpResult<'tcx, (Scalar<Tag>, bool, ty::Ty<'tcx>)> {
499 ecx.binary_ptr_op(bin_op, left, right)
503 ecx: &mut InterpCx<'mir, 'tcx, Self>,
504 dest: &PlaceTy<'tcx, Tag>,
505 ) -> InterpResult<'tcx> {
506 trace!("box_alloc for {:?}", dest.layout.ty);
507 let layout = ecx.layout_of(dest.layout.ty.builtin_deref(false).unwrap().ty)?;
508 // First argument: `size`.
509 // (`0` is allowed here -- this is expected to be handled by the lang item).
510 let size = Scalar::from_machine_usize(layout.size.bytes(), ecx);
512 // Second argument: `align`.
513 let align = Scalar::from_machine_usize(layout.align.abi.bytes(), ecx);
515 // Call the `exchange_malloc` lang item.
516 let malloc = ecx.tcx.lang_items().exchange_malloc_fn().unwrap();
517 let malloc = ty::Instance::mono(ecx.tcx.tcx, malloc);
521 &[size.into(), align.into()],
523 // Don't do anything when we are done. The `statement()` function will increment
524 // the old stack frame's stmt counter to the next statement, which means that when
525 // `exchange_malloc` returns, we go on evaluating exactly where we want to be.
526 StackPopCleanup::None { cleanup: true },
531 fn thread_local_static_base_pointer(
532 ecx: &mut InterpCx<'mir, 'tcx, Self>,
534 ) -> InterpResult<'tcx, Pointer<Tag>> {
535 ecx.get_or_create_thread_local_alloc(def_id)
538 fn extern_static_base_pointer(
539 memory: &Memory<'mir, 'tcx, Self>,
541 ) -> InterpResult<'tcx, Pointer<Tag>> {
542 let attrs = memory.tcx.get_attrs(def_id);
543 let link_name = match memory.tcx.sess.first_attr_value_str_by_name(&attrs, sym::link_name) {
545 None => memory.tcx.item_name(def_id),
547 if let Some(&ptr) = memory.extra.extern_statics.get(&link_name) {
550 throw_unsup_format!("`extern` static {:?} is not supported by Miri", def_id)
554 fn init_allocation_extra<'b>(
555 mem: &Memory<'mir, 'tcx, Self>,
557 alloc: Cow<'b, Allocation>,
558 kind: Option<MemoryKind<Self::MemoryKind>>,
559 ) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>> {
560 if Some(id) == mem.extra.tracked_alloc_id {
561 register_diagnostic(NonHaltingDiagnostic::CreatedAlloc(id));
564 let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
565 let alloc = alloc.into_owned();
566 let stacks = if let Some(stacked_borrows) = &mem.extra.stacked_borrows {
567 Some(Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind))
571 let race_alloc = if let Some(data_race) = &mem.extra.data_race {
572 Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size(), kind))
576 let alloc: Allocation<Tag, Self::AllocExtra> = alloc.convert_tag_add_extra(
578 AllocExtra { stacked_borrows: stacks, data_race: race_alloc },
579 |ptr| Evaluator::tag_alloc_base_pointer(mem, ptr),
584 fn tag_alloc_base_pointer(
585 mem: &Memory<'mir, 'tcx, Self>,
586 ptr: Pointer<AllocId>,
588 let absolute_addr = intptrcast::GlobalState::rel_ptr_to_addr(&mem, ptr);
589 let sb_tag = if let Some(stacked_borrows) = &mem.extra.stacked_borrows {
590 stacked_borrows.borrow_mut().base_tag(ptr.provenance)
594 Pointer::new(Tag { alloc_id: ptr.provenance, sb: sb_tag }, Size::from_bytes(absolute_addr))
599 mem: &Memory<'mir, 'tcx, Self>,
601 ) -> Pointer<Option<Self::PointerTag>> {
602 intptrcast::GlobalState::ptr_from_addr(addr, mem)
605 /// Convert a pointer with provenance into an allocation-offset pair,
606 /// or a `None` with an absolute address if that conversion is not possible.
608 mem: &Memory<'mir, 'tcx, Self>,
609 ptr: Pointer<Self::PointerTag>,
610 ) -> (AllocId, Size) {
611 let rel = intptrcast::GlobalState::abs_ptr_to_rel(mem, ptr);
612 (ptr.provenance.alloc_id, rel)
617 memory_extra: &Self::MemoryExtra,
618 alloc_extra: &AllocExtra,
621 ) -> InterpResult<'tcx> {
622 if let Some(data_race) = &alloc_extra.data_race {
623 data_race.read(tag.alloc_id, range, memory_extra.data_race.as_ref().unwrap())?;
625 if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
626 stacked_borrows.memory_read(
630 memory_extra.stacked_borrows.as_ref().unwrap(),
639 memory_extra: &mut Self::MemoryExtra,
640 alloc_extra: &mut AllocExtra,
643 ) -> InterpResult<'tcx> {
644 if let Some(data_race) = &mut alloc_extra.data_race {
645 data_race.write(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?;
647 if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
648 stacked_borrows.memory_written(
652 memory_extra.stacked_borrows.as_mut().unwrap(),
660 fn memory_deallocated(
661 memory_extra: &mut Self::MemoryExtra,
662 alloc_extra: &mut AllocExtra,
665 ) -> InterpResult<'tcx> {
666 if Some(tag.alloc_id) == memory_extra.tracked_alloc_id {
667 register_diagnostic(NonHaltingDiagnostic::FreedAlloc(tag.alloc_id));
669 if let Some(data_race) = &mut alloc_extra.data_race {
670 data_race.deallocate(tag.alloc_id, range, memory_extra.data_race.as_mut().unwrap())?;
672 if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
673 stacked_borrows.memory_deallocated(
677 memory_extra.stacked_borrows.as_mut().unwrap(),
686 ecx: &mut InterpCx<'mir, 'tcx, Self>,
687 kind: mir::RetagKind,
688 place: &PlaceTy<'tcx, Tag>,
689 ) -> InterpResult<'tcx> {
690 if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) }
695 ecx: &mut InterpCx<'mir, 'tcx, Self>,
696 frame: Frame<'mir, 'tcx, Tag>,
697 ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Tag, FrameData<'tcx>>> {
698 // Start recording our event before doing anything else
699 let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
700 let fn_name = frame.instance.to_string();
701 let entry = ecx.machine.string_cache.entry(fn_name.clone());
702 let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
704 Some(profiler.start_recording_interval_event_detached(
706 measureme::EventId::from_label(*name),
707 ecx.get_active_thread().to_u32(),
713 let stacked_borrows = ecx.memory.extra.stacked_borrows.as_ref();
714 let call_id = stacked_borrows.map_or(NonZeroU64::new(1).unwrap(), |stacked_borrows| {
715 stacked_borrows.borrow_mut().new_call()
718 let extra = FrameData { call_id, catch_unwind: None, timing };
719 Ok(frame.with_extra(extra))
723 ecx: &'a InterpCx<'mir, 'tcx, Self>,
724 ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
725 ecx.active_thread_stack()
729 ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
730 ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
731 ecx.active_thread_stack_mut()
735 fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
736 if ecx.memory.extra.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
741 ecx: &mut InterpCx<'mir, 'tcx, Self>,
742 mut frame: Frame<'mir, 'tcx, Tag, FrameData<'tcx>>,
744 ) -> InterpResult<'tcx, StackPopJump> {
745 let timing = frame.extra.timing.take();
746 let res = ecx.handle_stack_pop(frame.extra, unwinding);
747 if let Some(profiler) = ecx.machine.profiler.as_ref() {
748 profiler.finish_recording_interval_event(timing.unwrap());