1 //! Global machine state as well as implementation of the interpreter engine
5 use std::cell::RefCell;
8 use rand::rngs::StdRng;
11 use rustc_ast::ast::Mutability;
12 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
14 use rustc_data_structures::static_assert_size;
19 layout::{LayoutCx, LayoutError, LayoutOf, TyAndLayout},
20 Instance, Ty, TyCtxt, TypeAndMut,
23 use rustc_span::def_id::{CrateNum, DefId};
24 use rustc_span::Symbol;
25 use rustc_target::abi::Size;
26 use rustc_target::spec::abi::Abi;
29 concurrency::{data_race, weak_memory},
30 shims::unix::FileHandler,
34 // Some global facts about the emulated machine.
35 pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture
36 pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations
37 pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever
39 /// Extra data stored with each stack frame
40 pub struct FrameData<'tcx> {
41 /// Extra data for Stacked Borrows.
42 pub stacked_borrows: Option<stacked_borrows::FrameExtra>,
44 /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
45 /// called by `try`). When this frame is popped during unwinding a panic,
46 /// we stop unwinding, use the `CatchUnwindData` to handle catching.
47 pub catch_unwind: Option<CatchUnwindData<'tcx>>,
49 /// If `measureme` profiling is enabled, holds timing information
50 /// for the start of this frame. When we finish executing this frame,
51 /// we use this to register a completed event with `measureme`.
52 pub timing: Option<measureme::DetachedTiming>,
54 /// Indicates whether a `Frame` is part of a workspace-local crate and is also not
55 /// `#[track_caller]`. We compute this once on creation and store the result, as an
57 /// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
58 pub is_user_relevant: bool,
61 impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
62 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
63 // Omitting `timing`, it does not support `Debug`.
64 let FrameData { stacked_borrows, catch_unwind, timing: _, is_user_relevant: _ } = self;
65 f.debug_struct("FrameData")
66 .field("stacked_borrows", stacked_borrows)
67 .field("catch_unwind", catch_unwind)
72 impl VisitTags for FrameData<'_> {
73 fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
74 let FrameData { catch_unwind, stacked_borrows, timing: _, is_user_relevant: _ } = self;
76 catch_unwind.visit_tags(visit);
77 stacked_borrows.visit_tags(visit);
81 /// Extra memory kinds
82 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
83 pub enum MiriMemoryKind {
84 /// `__rust_alloc` memory.
86 /// `miri_alloc` memory.
90 /// Windows `HeapAlloc` memory.
92 /// Memory for args, errno, and other parts of the machine-managed environment.
93 /// This memory may leak.
95 /// Memory allocated by the runtime (e.g. env vars). Separate from `Machine`
96 /// because we clean it up and leak-check it.
98 /// Globals copied from `tcx`.
99 /// This memory may leak.
101 /// Memory for extern statics.
102 /// This memory may leak.
104 /// Memory for thread-local statics.
105 /// This memory may leak.
109 impl From<MiriMemoryKind> for MemoryKind<MiriMemoryKind> {
111 fn from(kind: MiriMemoryKind) -> MemoryKind<MiriMemoryKind> {
112 MemoryKind::Machine(kind)
116 impl MayLeak for MiriMemoryKind {
118 fn may_leak(self) -> bool {
119 use self::MiriMemoryKind::*;
121 Rust | Miri | C | WinHeap | Runtime => false,
122 Machine | Global | ExternStatic | Tls => true,
127 impl fmt::Display for MiriMemoryKind {
128 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
129 use self::MiriMemoryKind::*;
131 Rust => write!(f, "Rust heap"),
132 Miri => write!(f, "Miri bare-metal heap"),
133 C => write!(f, "C heap"),
134 WinHeap => write!(f, "Windows heap"),
135 Machine => write!(f, "machine-managed memory"),
136 Runtime => write!(f, "language runtime memory"),
137 Global => write!(f, "global (static or const)"),
138 ExternStatic => write!(f, "extern static"),
139 Tls => write!(f, "thread-local static"),
144 /// Pointer provenance.
145 #[derive(Clone, Copy)]
146 pub enum Provenance {
149 /// Stacked Borrows tag.
155 // This needs to be `Eq`+`Hash` because the `Machine` trait needs that because validity checking
156 // *might* be recursive and then it has to track which places have already been visited.
157 // However, comparing provenance is meaningless, since `Wildcard` might be any provenance -- and of
158 // course we don't actually do recursive checking.
159 // We could change `RefTracking` to strip provenance for its `seen` set but that type is generic so that is quite annoying.
160 // Instead owe add the required instances but make them panic.
161 impl PartialEq for Provenance {
162 fn eq(&self, _other: &Self) -> bool {
163 panic!("Provenance must not be compared")
166 impl Eq for Provenance {}
167 impl std::hash::Hash for Provenance {
168 fn hash<H: std::hash::Hasher>(&self, _state: &mut H) {
169 panic!("Provenance must not be hashed")
173 /// The "extra" information a pointer has over a regular AllocId.
174 #[derive(Copy, Clone, PartialEq)]
175 pub enum ProvenanceExtra {
180 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
181 static_assert_size!(Pointer<Provenance>, 24);
182 // FIXME: this would with in 24bytes but layout optimizations are not smart enough
183 // #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
184 //static_assert_size!(Pointer<Option<Provenance>>, 24);
185 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
186 static_assert_size!(Scalar<Provenance>, 32);
188 impl fmt::Debug for Provenance {
189 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
191 Provenance::Concrete { alloc_id, sb } => {
192 // Forward `alternate` flag to `alloc_id` printing.
194 write!(f, "[{alloc_id:#?}]")?;
196 write!(f, "[{alloc_id:?}]")?;
198 // Print Stacked Borrows tag.
199 write!(f, "{sb:?}")?;
201 Provenance::Wildcard => {
202 write!(f, "[wildcard]")?;
209 impl interpret::Provenance for Provenance {
210 /// We use absolute addresses in the `offset` of a `Pointer<Provenance>`.
211 const OFFSET_IS_ADDR: bool = true;
213 fn get_alloc_id(self) -> Option<AllocId> {
215 Provenance::Concrete { alloc_id, .. } => Some(alloc_id),
216 Provenance::Wildcard => None,
220 fn join(left: Option<Self>, right: Option<Self>) -> Option<Self> {
221 match (left, right) {
222 // If both are the *same* concrete tag, that is the result.
224 Some(Provenance::Concrete { alloc_id: left_alloc, sb: left_sb }),
225 Some(Provenance::Concrete { alloc_id: right_alloc, sb: right_sb }),
226 ) if left_alloc == right_alloc && left_sb == right_sb => left,
227 // If one side is a wildcard, the best possible outcome is that it is equal to the other
228 // one, and we use that.
229 (Some(Provenance::Wildcard), o) | (o, Some(Provenance::Wildcard)) => o,
230 // Otherwise, fall back to `None`.
236 impl fmt::Debug for ProvenanceExtra {
237 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
239 ProvenanceExtra::Concrete(pid) => write!(f, "{pid:?}"),
240 ProvenanceExtra::Wildcard => write!(f, "<wildcard>"),
245 impl ProvenanceExtra {
246 pub fn and_then<T>(self, f: impl FnOnce(SbTag) -> Option<T>) -> Option<T> {
248 ProvenanceExtra::Concrete(pid) => f(pid),
249 ProvenanceExtra::Wildcard => None,
254 /// Extra per-allocation data
255 #[derive(Debug, Clone)]
256 pub struct AllocExtra {
257 /// Stacked Borrows state is only added if it is enabled.
258 pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
259 /// Data race detection via the use of a vector-clock,
260 /// this is only added if it is enabled.
261 pub data_race: Option<data_race::AllocExtra>,
262 /// Weak memory emulation via the use of store buffers,
263 /// this is only added if it is enabled.
264 pub weak_memory: Option<weak_memory::AllocExtra>,
267 impl VisitTags for AllocExtra {
268 fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
269 let AllocExtra { stacked_borrows, data_race, weak_memory } = self;
271 stacked_borrows.visit_tags(visit);
272 data_race.visit_tags(visit);
273 weak_memory.visit_tags(visit);
277 /// Precomputed layouts of primitive types
278 pub struct PrimitiveLayouts<'tcx> {
279 pub unit: TyAndLayout<'tcx>,
280 pub i8: TyAndLayout<'tcx>,
281 pub i16: TyAndLayout<'tcx>,
282 pub i32: TyAndLayout<'tcx>,
283 pub i64: TyAndLayout<'tcx>,
284 pub i128: TyAndLayout<'tcx>,
285 pub isize: TyAndLayout<'tcx>,
286 pub u8: TyAndLayout<'tcx>,
287 pub u16: TyAndLayout<'tcx>,
288 pub u32: TyAndLayout<'tcx>,
289 pub u64: TyAndLayout<'tcx>,
290 pub u128: TyAndLayout<'tcx>,
291 pub usize: TyAndLayout<'tcx>,
292 pub bool: TyAndLayout<'tcx>,
293 pub mut_raw_ptr: TyAndLayout<'tcx>, // *mut ()
294 pub const_raw_ptr: TyAndLayout<'tcx>, // *const ()
297 impl<'mir, 'tcx: 'mir> PrimitiveLayouts<'tcx> {
298 fn new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result<Self, LayoutError<'tcx>> {
299 let tcx = layout_cx.tcx;
300 let mut_raw_ptr = tcx.mk_ptr(TypeAndMut { ty: tcx.types.unit, mutbl: Mutability::Mut });
301 let const_raw_ptr = tcx.mk_ptr(TypeAndMut { ty: tcx.types.unit, mutbl: Mutability::Not });
303 unit: layout_cx.layout_of(tcx.mk_unit())?,
304 i8: layout_cx.layout_of(tcx.types.i8)?,
305 i16: layout_cx.layout_of(tcx.types.i16)?,
306 i32: layout_cx.layout_of(tcx.types.i32)?,
307 i64: layout_cx.layout_of(tcx.types.i64)?,
308 i128: layout_cx.layout_of(tcx.types.i128)?,
309 isize: layout_cx.layout_of(tcx.types.isize)?,
310 u8: layout_cx.layout_of(tcx.types.u8)?,
311 u16: layout_cx.layout_of(tcx.types.u16)?,
312 u32: layout_cx.layout_of(tcx.types.u32)?,
313 u64: layout_cx.layout_of(tcx.types.u64)?,
314 u128: layout_cx.layout_of(tcx.types.u128)?,
315 usize: layout_cx.layout_of(tcx.types.usize)?,
316 bool: layout_cx.layout_of(tcx.types.bool)?,
317 mut_raw_ptr: layout_cx.layout_of(mut_raw_ptr)?,
318 const_raw_ptr: layout_cx.layout_of(const_raw_ptr)?,
322 pub fn uint(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
325 16 => Some(self.u16),
326 32 => Some(self.u32),
327 64 => Some(self.u64),
328 128 => Some(self.u128),
333 pub fn int(&self, size: Size) -> Option<TyAndLayout<'tcx>> {
336 16 => Some(self.i16),
337 32 => Some(self.i32),
338 64 => Some(self.i64),
339 128 => Some(self.i128),
345 /// The machine itself.
347 /// If you add anything here that stores machine values, remember to update
348 /// `visit_all_machine_values`!
349 pub struct MiriMachine<'mir, 'tcx> {
350 // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
351 pub tcx: TyCtxt<'tcx>,
353 /// Stacked Borrows global data.
354 pub stacked_borrows: Option<stacked_borrows::GlobalState>,
356 /// Data race detector global data.
357 pub data_race: Option<data_race::GlobalState>,
359 /// Ptr-int-cast module global data.
360 pub intptrcast: intptrcast::GlobalState,
362 /// Environment variables set by `setenv`.
363 /// Miri does not expose env vars from the host to the emulated program.
364 pub(crate) env_vars: EnvVars<'tcx>,
366 /// Return place of the main function.
367 pub(crate) main_fn_ret_place: Option<MemPlace<Provenance>>,
369 /// Program arguments (`Option` because we can only initialize them after creating the ecx).
370 /// These are *pointers* to argc/argv because macOS.
371 /// We also need the full command line as one string because of Windows.
372 pub(crate) argc: Option<MemPlace<Provenance>>,
373 pub(crate) argv: Option<MemPlace<Provenance>>,
374 pub(crate) cmd_line: Option<MemPlace<Provenance>>,
377 pub(crate) tls: TlsData<'tcx>,
379 /// What should Miri do when an op requires communicating with the host,
380 /// such as accessing host env vars, random number generation, and
381 /// file system access.
382 pub(crate) isolated_op: IsolatedOp,
384 /// Whether to enforce the validity invariant.
385 pub(crate) validate: bool,
387 /// Whether to enforce [ABI](Abi) of function calls.
388 pub(crate) enforce_abi: bool,
390 /// The table of file descriptors.
391 pub(crate) file_handler: shims::unix::FileHandler,
392 /// The table of directory descriptors.
393 pub(crate) dir_handler: shims::unix::DirHandler,
395 /// This machine's monotone clock.
396 pub(crate) clock: Clock,
398 /// The set of threads.
399 pub(crate) threads: ThreadManager<'mir, 'tcx>,
401 /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri.
402 pub(crate) layouts: PrimitiveLayouts<'tcx>,
404 /// Allocations that are considered roots of static memory (that may leak).
405 pub(crate) static_roots: Vec<AllocId>,
407 /// The `measureme` profiler used to record timing information about
408 /// the emulated program.
409 profiler: Option<measureme::Profiler>,
410 /// Used with `profiler` to cache the `StringId`s for event names
411 /// uesd with `measureme`.
412 string_cache: FxHashMap<String, measureme::StringId>,
414 /// Cache of `Instance` exported under the given `Symbol` name.
415 /// `None` means no `Instance` exported under the given name is found.
416 pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
418 /// Whether to raise a panic in the context of the evaluated process when unsupported
419 /// functionality is encountered. If `false`, an error is propagated in the Miri application context
420 /// instead (default behavior)
421 pub(crate) panic_on_unsupported: bool,
423 /// Equivalent setting as RUST_BACKTRACE on encountering an error.
424 pub(crate) backtrace_style: BacktraceStyle,
426 /// Crates which are considered local for the purposes of error reporting.
427 pub(crate) local_crates: Vec<CrateNum>,
429 /// Mapping extern static names to their base pointer.
430 extern_statics: FxHashMap<Symbol, Pointer<Provenance>>,
432 /// The random number generator used for resolving non-determinism.
433 /// Needs to be queried by ptr_to_int, hence needs interior mutability.
434 pub(crate) rng: RefCell<StdRng>,
436 /// The allocation IDs to report when they are being allocated
437 /// (helps for debugging memory leaks and use after free bugs).
438 tracked_alloc_ids: FxHashSet<AllocId>,
440 /// Controls whether alignment of memory accesses is being checked.
441 pub(crate) check_alignment: AlignmentCheck,
443 /// Failure rate of compare_exchange_weak, between 0.0 and 1.0
444 pub(crate) cmpxchg_weak_failure_rate: f64,
446 /// Corresponds to -Zmiri-mute-stdout-stderr and doesn't write the output but acts as if it succeeded.
447 pub(crate) mute_stdout_stderr: bool,
449 /// Whether weak memory emulation is enabled
450 pub(crate) weak_memory: bool,
452 /// The probability of the active thread being preempted at the end of each basic block.
453 pub(crate) preemption_rate: f64,
455 /// If `Some`, we will report the current stack every N basic blocks.
456 pub(crate) report_progress: Option<u32>,
457 // The total number of blocks that have been executed.
458 pub(crate) basic_block_count: u64,
460 /// Handle of the optional shared object file for external functions.
461 #[cfg(target_os = "linux")]
462 pub external_so_lib: Option<(libloading::Library, std::path::PathBuf)>,
463 #[cfg(not(target_os = "linux"))]
464 pub external_so_lib: Option<!>,
466 /// Run a garbage collector for SbTags every N basic blocks.
467 pub(crate) gc_interval: u32,
468 /// The number of blocks that passed since the last SbTag GC pass.
469 pub(crate) since_gc: u32,
470 /// The number of CPUs to be reported by miri.
471 pub(crate) num_cpus: u32,
474 impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
475 pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Self {
476 let local_crates = helpers::get_local_crates(layout_cx.tcx);
478 PrimitiveLayouts::new(layout_cx).expect("Couldn't get layouts of primitive types");
479 let profiler = config.measureme_out.as_ref().map(|out| {
480 measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler")
482 let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
483 let stacked_borrows = config.stacked_borrows.then(|| {
484 RefCell::new(stacked_borrows::GlobalStateInner::new(
485 config.tracked_pointer_tags.clone(),
486 config.tracked_call_ids.clone(),
490 let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config));
495 intptrcast: RefCell::new(intptrcast::GlobalStateInner::new(config)),
496 // `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
497 env_vars: EnvVars::default(),
498 main_fn_ret_place: None,
502 tls: TlsData::default(),
503 isolated_op: config.isolated_op,
504 validate: config.validate,
505 enforce_abi: config.check_abi,
506 file_handler: FileHandler::new(config.mute_stdout_stderr),
507 dir_handler: Default::default(),
509 threads: ThreadManager::default(),
510 static_roots: Vec::new(),
512 string_cache: Default::default(),
513 exported_symbols_cache: FxHashMap::default(),
514 panic_on_unsupported: config.panic_on_unsupported,
515 backtrace_style: config.backtrace_style,
517 extern_statics: FxHashMap::default(),
518 rng: RefCell::new(rng),
519 tracked_alloc_ids: config.tracked_alloc_ids.clone(),
520 check_alignment: config.check_alignment,
521 cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
522 mute_stdout_stderr: config.mute_stdout_stderr,
523 weak_memory: config.weak_memory_emulation,
524 preemption_rate: config.preemption_rate,
525 report_progress: config.report_progress,
526 basic_block_count: 0,
527 clock: Clock::new(config.isolated_op == IsolatedOp::Allow),
528 #[cfg(target_os = "linux")]
529 external_so_lib: config.external_so_file.as_ref().map(|lib_file_path| {
530 let target_triple = layout_cx.tcx.sess.opts.target_triple.triple();
531 // Check if host target == the session target.
532 if env!("TARGET") != target_triple {
534 "calling external C functions in linked .so file requires host and target to be the same: host={}, target={}",
539 // Note: it is the user's responsibility to provide a correct SO file.
540 // WATCH OUT: If an invalid/incorrect SO file is specified, this can cause
541 // undefined behaviour in Miri itself!
544 libloading::Library::new(lib_file_path)
545 .expect("failed to read specified extern shared object file")
547 lib_file_path.clone(),
550 #[cfg(not(target_os = "linux"))]
551 external_so_lib: config.external_so_file.as_ref().map(|_| {
552 panic!("loading external .so files is only supported on Linux")
554 gc_interval: config.gc_interval,
556 num_cpus: config.num_cpus,
560 pub(crate) fn late_init(
561 this: &mut MiriInterpCx<'mir, 'tcx>,
563 on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>,
564 ) -> InterpResult<'tcx> {
565 EnvVars::init(this, config)?;
566 MiriMachine::init_extern_statics(this)?;
567 ThreadManager::init(this, on_main_stack_empty);
571 fn add_extern_static(
572 this: &mut MiriInterpCx<'mir, 'tcx>,
574 ptr: Pointer<Option<Provenance>>,
576 // This got just allocated, so there definitely is a pointer here.
577 let ptr = ptr.into_pointer_or_addr().unwrap();
578 this.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
581 fn alloc_extern_static(
582 this: &mut MiriInterpCx<'mir, 'tcx>,
584 val: ImmTy<'tcx, Provenance>,
585 ) -> InterpResult<'tcx> {
586 let place = this.allocate(val.layout, MiriMemoryKind::ExternStatic.into())?;
587 this.write_immediate(*val, &place.into())?;
588 Self::add_extern_static(this, name, place.ptr);
592 /// Sets up the "extern statics" for this machine.
593 fn init_extern_statics(this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
594 match this.tcx.sess.target.os.as_ref() {
597 Self::add_extern_static(
600 this.machine.env_vars.environ.unwrap().ptr,
602 // A couple zero-initialized pointer-sized extern statics.
603 // Most of them are for weak symbols, which we all set to null (indicating that the
604 // symbol is not supported, and triggering fallback code which ends up calling a
605 // syscall that we do support).
606 for name in &["__cxa_thread_atexit_impl", "getrandom", "statx", "__clock_gettime64"]
608 let val = ImmTy::from_int(0, this.machine.layouts.usize);
609 Self::alloc_extern_static(this, name, val)?;
614 Self::add_extern_static(
617 this.machine.env_vars.environ.unwrap().ptr,
622 let layout = this.machine.layouts.const_raw_ptr;
623 let dlsym = Dlsym::from_str("signal".as_bytes(), &this.tcx.sess.target.os)?
624 .expect("`signal` must be an actual dlsym on android");
625 let ptr = this.create_fn_alloc_ptr(FnVal::Other(dlsym));
626 let val = ImmTy::from_scalar(Scalar::from_pointer(ptr, this), layout);
627 Self::alloc_extern_static(this, "signal", val)?;
628 // A couple zero-initialized pointer-sized extern statics.
629 // Most of them are for weak symbols, which we all set to null (indicating that the
630 // symbol is not supported, and triggering fallback code.)
631 for name in &["bsd_signal"] {
632 let val = ImmTy::from_int(0, this.machine.layouts.usize);
633 Self::alloc_extern_static(this, name, val)?;
638 // This is some obscure hack that is part of the Windows TLS story. It's a `u8`.
639 let val = ImmTy::from_int(0, this.machine.layouts.u8);
640 Self::alloc_extern_static(this, "_tls_used", val)?;
642 _ => {} // No "extern statics" supported on this target
647 pub(crate) fn communicate(&self) -> bool {
648 self.isolated_op == IsolatedOp::Allow
651 /// Check whether the stack frame that this `FrameInfo` refers to is part of a local crate.
652 pub(crate) fn is_local(&self, frame: &FrameInfo<'_>) -> bool {
653 let def_id = frame.instance.def_id();
654 def_id.is_local() || self.local_crates.contains(&def_id.krate)
658 impl VisitTags for MiriMachine<'_, '_> {
659 fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
684 exported_symbols_cache: _,
685 panic_on_unsupported: _,
689 tracked_alloc_ids: _,
691 cmpxchg_weak_failure_rate: _,
692 mute_stdout_stderr: _,
696 basic_block_count: _,
703 threads.visit_tags(visit);
704 tls.visit_tags(visit);
705 env_vars.visit_tags(visit);
706 dir_handler.visit_tags(visit);
707 file_handler.visit_tags(visit);
708 data_race.visit_tags(visit);
709 stacked_borrows.visit_tags(visit);
710 intptrcast.visit_tags(visit);
711 main_fn_ret_place.visit_tags(visit);
712 argc.visit_tags(visit);
713 argv.visit_tags(visit);
714 cmd_line.visit_tags(visit);
715 for ptr in extern_statics.values() {
716 ptr.visit_tags(visit);
721 /// A rustc InterpCx for Miri.
722 pub type MiriInterpCx<'mir, 'tcx> = InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>;
724 /// A little trait that's useful to be inherited by extension traits.
725 pub trait MiriInterpCxExt<'mir, 'tcx> {
726 fn eval_context_ref<'a>(&'a self) -> &'a MiriInterpCx<'mir, 'tcx>;
727 fn eval_context_mut<'a>(&'a mut self) -> &'a mut MiriInterpCx<'mir, 'tcx>;
729 impl<'mir, 'tcx> MiriInterpCxExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {
731 fn eval_context_ref(&self) -> &MiriInterpCx<'mir, 'tcx> {
735 fn eval_context_mut(&mut self) -> &mut MiriInterpCx<'mir, 'tcx> {
740 /// Machine hook implementations.
741 impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
742 type MemoryKind = MiriMemoryKind;
743 type ExtraFnVal = Dlsym;
745 type FrameExtra = FrameData<'tcx>;
746 type AllocExtra = AllocExtra;
748 type Provenance = Provenance;
749 type ProvenanceExtra = ProvenanceExtra;
751 type MemoryMap = MonoHashMap<
753 (MemoryKind<MiriMemoryKind>, Allocation<Provenance, Self::AllocExtra>),
756 const GLOBAL_KIND: Option<MiriMemoryKind> = Some(MiriMemoryKind::Global);
758 const PANIC_ON_ALLOC_FAIL: bool = false;
761 fn enforce_alignment(ecx: &MiriInterpCx<'mir, 'tcx>) -> bool {
762 ecx.machine.check_alignment != AlignmentCheck::None
766 fn use_addr_for_alignment_check(ecx: &MiriInterpCx<'mir, 'tcx>) -> bool {
767 ecx.machine.check_alignment == AlignmentCheck::Int
771 fn enforce_validity(ecx: &MiriInterpCx<'mir, 'tcx>) -> bool {
776 fn enforce_abi(ecx: &MiriInterpCx<'mir, 'tcx>) -> bool {
777 ecx.machine.enforce_abi
781 fn checked_binop_checks_overflow(ecx: &MiriInterpCx<'mir, 'tcx>) -> bool {
782 ecx.tcx.sess.overflow_checks()
786 fn find_mir_or_eval_fn(
787 ecx: &mut MiriInterpCx<'mir, 'tcx>,
788 instance: ty::Instance<'tcx>,
790 args: &[OpTy<'tcx, Provenance>],
791 dest: &PlaceTy<'tcx, Provenance>,
792 ret: Option<mir::BasicBlock>,
793 unwind: StackPopUnwind,
794 ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
795 ecx.find_mir_or_eval_fn(instance, abi, args, dest, ret, unwind)
800 ecx: &mut MiriInterpCx<'mir, 'tcx>,
803 args: &[OpTy<'tcx, Provenance>],
804 dest: &PlaceTy<'tcx, Provenance>,
805 ret: Option<mir::BasicBlock>,
806 _unwind: StackPopUnwind,
807 ) -> InterpResult<'tcx> {
808 ecx.call_dlsym(fn_val, abi, args, dest, ret)
813 ecx: &mut MiriInterpCx<'mir, 'tcx>,
814 instance: ty::Instance<'tcx>,
815 args: &[OpTy<'tcx, Provenance>],
816 dest: &PlaceTy<'tcx, Provenance>,
817 ret: Option<mir::BasicBlock>,
818 unwind: StackPopUnwind,
819 ) -> InterpResult<'tcx> {
820 ecx.call_intrinsic(instance, args, dest, ret, unwind)
825 ecx: &mut MiriInterpCx<'mir, 'tcx>,
826 msg: &mir::AssertMessage<'tcx>,
827 unwind: Option<mir::BasicBlock>,
828 ) -> InterpResult<'tcx> {
829 ecx.assert_panic(msg, unwind)
833 fn abort(_ecx: &mut MiriInterpCx<'mir, 'tcx>, msg: String) -> InterpResult<'tcx, !> {
834 throw_machine_stop!(TerminationInfo::Abort(msg))
839 ecx: &MiriInterpCx<'mir, 'tcx>,
841 left: &ImmTy<'tcx, Provenance>,
842 right: &ImmTy<'tcx, Provenance>,
843 ) -> InterpResult<'tcx, (Scalar<Provenance>, bool, Ty<'tcx>)> {
844 ecx.binary_ptr_op(bin_op, left, right)
847 fn thread_local_static_base_pointer(
848 ecx: &mut MiriInterpCx<'mir, 'tcx>,
850 ) -> InterpResult<'tcx, Pointer<Provenance>> {
851 ecx.get_or_create_thread_local_alloc(def_id)
854 fn extern_static_base_pointer(
855 ecx: &MiriInterpCx<'mir, 'tcx>,
857 ) -> InterpResult<'tcx, Pointer<Provenance>> {
858 let link_name = ecx.item_link_name(def_id);
859 if let Some(&ptr) = ecx.machine.extern_statics.get(&link_name) {
860 // Various parts of the engine rely on `get_alloc_info` for size and alignment
861 // information. That uses the type information of this static.
862 // Make sure it matches the Miri allocation for this.
863 let Provenance::Concrete { alloc_id, .. } = ptr.provenance else {
864 panic!("extern_statics cannot contain wildcards")
866 let (shim_size, shim_align, _kind) = ecx.get_alloc_info(alloc_id);
867 let extern_decl_layout =
868 ecx.tcx.layout_of(ty::ParamEnv::empty().and(ecx.tcx.type_of(def_id))).unwrap();
869 if extern_decl_layout.size != shim_size || extern_decl_layout.align.abi != shim_align {
871 "`extern` static `{name}` from crate `{krate}` has been declared \
872 with a size of {decl_size} bytes and alignment of {decl_align} bytes, \
873 but Miri emulates it via an extern static shim \
874 with a size of {shim_size} bytes and alignment of {shim_align} bytes",
875 name = ecx.tcx.def_path_str(def_id),
876 krate = ecx.tcx.crate_name(def_id.krate),
877 decl_size = extern_decl_layout.size.bytes(),
878 decl_align = extern_decl_layout.align.abi.bytes(),
879 shim_size = shim_size.bytes(),
880 shim_align = shim_align.bytes(),
886 "`extern` static `{name}` from crate `{krate}` is not supported by Miri",
887 name = ecx.tcx.def_path_str(def_id),
888 krate = ecx.tcx.crate_name(def_id.krate),
893 fn adjust_allocation<'b>(
894 ecx: &MiriInterpCx<'mir, 'tcx>,
896 alloc: Cow<'b, Allocation>,
897 kind: Option<MemoryKind<Self::MemoryKind>>,
898 ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>> {
899 let kind = kind.expect("we set our STATIC_KIND so this cannot be None");
900 if ecx.machine.tracked_alloc_ids.contains(&id) {
901 ecx.emit_diagnostic(NonHaltingDiagnostic::CreatedAlloc(
909 let alloc = alloc.into_owned();
910 let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| {
911 stacked_borrows::Stacks::new_allocation(
919 let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| {
920 data_race::AllocExtra::new_allocation(
922 &ecx.machine.threads,
927 let buffer_alloc = ecx.machine.weak_memory.then(weak_memory::AllocExtra::new_allocation);
928 let alloc: Allocation<Provenance, Self::AllocExtra> = alloc.adjust_from_tcx(
931 stacked_borrows: stacks.map(RefCell::new),
932 data_race: race_alloc,
933 weak_memory: buffer_alloc,
935 |ptr| ecx.global_base_pointer(ptr),
937 Ok(Cow::Owned(alloc))
940 fn adjust_alloc_base_pointer(
941 ecx: &MiriInterpCx<'mir, 'tcx>,
942 ptr: Pointer<AllocId>,
943 ) -> Pointer<Provenance> {
944 if cfg!(debug_assertions) {
945 // The machine promises to never call us on thread-local or extern statics.
946 let alloc_id = ptr.provenance;
947 match ecx.tcx.try_get_global_alloc(alloc_id) {
948 Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_thread_local_static(def_id) => {
949 panic!("adjust_alloc_base_pointer called on thread-local static")
951 Some(GlobalAlloc::Static(def_id)) if ecx.tcx.is_foreign_item(def_id) => {
952 panic!("adjust_alloc_base_pointer called on extern static")
957 let absolute_addr = intptrcast::GlobalStateInner::rel_ptr_to_addr(ecx, ptr);
958 let sb_tag = if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
959 stacked_borrows.borrow_mut().base_ptr_tag(ptr.provenance, &ecx.machine)
961 // Value does not matter, SB is disabled
965 Provenance::Concrete { alloc_id: ptr.provenance, sb: sb_tag },
966 Size::from_bytes(absolute_addr),
971 fn ptr_from_addr_cast(
972 ecx: &MiriInterpCx<'mir, 'tcx>,
974 ) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>> {
975 intptrcast::GlobalStateInner::ptr_from_addr_cast(ecx, addr)
979 ecx: &mut InterpCx<'mir, 'tcx, Self>,
980 ptr: Pointer<Self::Provenance>,
981 ) -> InterpResult<'tcx> {
982 match ptr.provenance {
983 Provenance::Concrete { alloc_id, sb } =>
984 intptrcast::GlobalStateInner::expose_ptr(ecx, alloc_id, sb),
985 Provenance::Wildcard => {
986 // No need to do anything for wildcard pointers as
987 // their provenances have already been previously exposed.
993 /// Convert a pointer with provenance into an allocation-offset pair,
994 /// or a `None` with an absolute address if that conversion is not possible.
996 ecx: &MiriInterpCx<'mir, 'tcx>,
997 ptr: Pointer<Self::Provenance>,
998 ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
999 let rel = intptrcast::GlobalStateInner::abs_ptr_to_rel(ecx, ptr);
1001 rel.map(|(alloc_id, size)| {
1002 let sb = match ptr.provenance {
1003 Provenance::Concrete { sb, .. } => ProvenanceExtra::Concrete(sb),
1004 Provenance::Wildcard => ProvenanceExtra::Wildcard,
1006 (alloc_id, size, sb)
1011 fn before_memory_read(
1014 alloc_extra: &AllocExtra,
1015 (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1017 ) -> InterpResult<'tcx> {
1018 if let Some(data_race) = &alloc_extra.data_race {
1019 data_race.read(alloc_id, range, machine)?;
1021 if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
1024 .before_memory_read(alloc_id, prov_extra, range, machine)?;
1026 if let Some(weak_memory) = &alloc_extra.weak_memory {
1027 weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
1033 fn before_memory_write(
1036 alloc_extra: &mut AllocExtra,
1037 (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
1039 ) -> InterpResult<'tcx> {
1040 if let Some(data_race) = &mut alloc_extra.data_race {
1041 data_race.write(alloc_id, range, machine)?;
1043 if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
1044 stacked_borrows.get_mut().before_memory_write(alloc_id, prov_extra, range, machine)?;
1046 if let Some(weak_memory) = &alloc_extra.weak_memory {
1047 weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
1053 fn before_memory_deallocation(
1056 alloc_extra: &mut AllocExtra,
1057 (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
1059 ) -> InterpResult<'tcx> {
1060 if machine.tracked_alloc_ids.contains(&alloc_id) {
1061 machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
1063 if let Some(data_race) = &mut alloc_extra.data_race {
1064 data_race.deallocate(alloc_id, range, machine)?;
1066 if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
1067 stacked_borrows.get_mut().before_memory_deallocation(
1080 ecx: &mut InterpCx<'mir, 'tcx, Self>,
1081 kind: mir::RetagKind,
1082 place: &PlaceTy<'tcx, Provenance>,
1083 ) -> InterpResult<'tcx> {
1084 if ecx.machine.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) }
1088 fn init_frame_extra(
1089 ecx: &mut InterpCx<'mir, 'tcx, Self>,
1090 frame: Frame<'mir, 'tcx, Provenance>,
1091 ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>> {
1092 // Start recording our event before doing anything else
1093 let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
1094 let fn_name = frame.instance.to_string();
1095 let entry = ecx.machine.string_cache.entry(fn_name.clone());
1096 let name = entry.or_insert_with(|| profiler.alloc_string(&*fn_name));
1098 Some(profiler.start_recording_interval_event_detached(
1100 measureme::EventId::from_label(*name),
1101 ecx.get_active_thread().to_u32(),
1107 let stacked_borrows = ecx.machine.stacked_borrows.as_ref();
1109 let extra = FrameData {
1110 stacked_borrows: stacked_borrows.map(|sb| sb.borrow_mut().new_frame(&ecx.machine)),
1113 is_user_relevant: ecx.machine.is_user_relevant(&frame),
1116 Ok(frame.with_extra(extra))
1120 ecx: &'a InterpCx<'mir, 'tcx, Self>,
1121 ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
1122 ecx.active_thread_stack()
1126 ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
1127 ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
1128 ecx.active_thread_stack_mut()
1131 fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
1132 ecx.machine.basic_block_count += 1u64; // a u64 that is only incremented by 1 will "never" overflow
1133 ecx.machine.since_gc += 1;
1134 // Possibly report our progress.
1135 if let Some(report_progress) = ecx.machine.report_progress {
1136 if ecx.machine.basic_block_count % u64::from(report_progress) == 0 {
1137 ecx.emit_diagnostic(NonHaltingDiagnostic::ProgressReport {
1138 block_count: ecx.machine.basic_block_count,
1143 // Search for SbTags to find all live pointers, then remove all other tags from borrow
1145 // When debug assertions are enabled, run the GC as often as possible so that any cases
1146 // where it mistakenly removes an important tag become visible.
1147 if ecx.machine.gc_interval > 0 && ecx.machine.since_gc >= ecx.machine.gc_interval {
1148 ecx.machine.since_gc = 0;
1149 ecx.garbage_collect_tags()?;
1152 // These are our preemption points.
1153 ecx.maybe_preempt_active_thread();
1155 // Make sure some time passes.
1156 ecx.machine.clock.tick();
1162 fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
1163 if ecx.frame().extra.is_user_relevant {
1164 // We just pushed a local frame, so we know that the topmost local frame is the topmost
1165 // frame. If we push a non-local frame, there's no need to do anything.
1166 let stack_len = ecx.active_thread_stack().len();
1167 ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
1170 if ecx.machine.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
1175 ecx: &mut InterpCx<'mir, 'tcx, Self>,
1176 mut frame: Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>,
1178 ) -> InterpResult<'tcx, StackPopJump> {
1179 if frame.extra.is_user_relevant {
1180 // All that we store is whether or not the frame we just removed is local, so now we
1181 // have no idea where the next topmost local frame is. So we recompute it.
1182 // (If this ever becomes a bottleneck, we could have `push` store the previous
1183 // user-relevant frame and restore that here.)
1184 ecx.active_thread_mut().recompute_top_user_relevant_frame();
1186 let timing = frame.extra.timing.take();
1187 if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
1188 stacked_borrows.borrow_mut().end_call(&frame.extra);
1190 let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
1191 if let Some(profiler) = ecx.machine.profiler.as_ref() {
1192 profiler.finish_recording_interval_event(timing.unwrap());