use std::cell::RefCell;
use std::collections::hash_map::Entry;
use std::num::TryFromIntError;
+use std::task::Poll;
use std::time::{Duration, SystemTime};
use log::trace;
use crate::concurrency::data_race;
use crate::concurrency::sync::SynchronizationState;
+use crate::shims::tls;
use crate::*;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
ExecuteStep,
/// Execute a timeout callback.
ExecuteTimeoutCallback,
- /// Execute destructors of the active thread.
- ExecuteDtors,
- /// Stop the program.
- Stop,
+ /// Wait for a bit, until there is a timeout to be called.
+ Sleep(Duration),
}
/// Trait for callbacks that can be executed when some event happens, such as after a timeout.
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct ThreadId(u32);
-/// The main thread. When it terminates, the whole application terminates.
-const MAIN_THREAD: ThreadId = ThreadId(0);
-
impl ThreadId {
pub fn to_u32(self) -> u32 {
self.0
/// The virtual call stack.
stack: Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>>,
+ /// The function to call when the stack ran empty, to figure out what to do next.
+ /// Conceptually, this is the interpreter implementation of the things that happen 'after' the
+ /// Rust language entry point for this thread returns (usually implemented by the C or OS runtime).
+ /// (`None` is an error, it means the callback has not been set up yet or is actively running.)
+ pub(crate) on_stack_empty: Option<StackEmptyCallback<'mir, 'tcx>>,
+
/// The index of the topmost user-relevant frame in `stack`. This field must contain
/// the value produced by `get_top_user_relevant_frame`.
/// The `None` state here represents
pub(crate) last_error: Option<MPlaceTy<'tcx, Provenance>>,
}
-impl<'mir, 'tcx> Thread<'mir, 'tcx> {
- /// Check if the thread is done executing (no more stack frames). If yes,
- /// change the state to terminated and return `true`.
- fn check_terminated(&mut self) -> bool {
- if self.state == ThreadState::Enabled {
- if self.stack.is_empty() {
- self.state = ThreadState::Terminated;
- return true;
- }
- }
- false
- }
+pub type StackEmptyCallback<'mir, 'tcx> =
+ Box<dyn FnMut(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx, Poll<()>>>;
+impl<'mir, 'tcx> Thread<'mir, 'tcx> {
/// Get the name of the current thread, or `<unnamed>` if it was not set.
fn thread_name(&self) -> &[u8] {
if let Some(ref thread_name) = self.thread_name { thread_name } else { b"<unnamed>" }
}
}
-impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> {
- fn default() -> Self {
+impl<'mir, 'tcx> Thread<'mir, 'tcx> {
+ fn new(name: Option<&str>, on_stack_empty: Option<StackEmptyCallback<'mir, 'tcx>>) -> Self {
Self {
state: ThreadState::Enabled,
- thread_name: None,
+ thread_name: name.map(|name| Vec::from(name.as_bytes())),
stack: Vec::new(),
top_user_relevant_frame: None,
join_status: ThreadJoinStatus::Joinable,
panic_payload: None,
last_error: None,
+ on_stack_empty,
}
}
}
-impl<'mir, 'tcx> Thread<'mir, 'tcx> {
- fn new(name: &str) -> Self {
- let mut thread = Thread::default();
- thread.thread_name = Some(Vec::from(name.as_bytes()));
- thread
- }
-}
-
impl VisitTags for Thread<'_, '_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
let Thread {
state: _,
thread_name: _,
join_status: _,
+ on_stack_empty: _, // we assume the closure captures no GC-relevant state
} = self;
panic_payload.visit_tags(visit);
timeout_callbacks: FxHashMap<ThreadId, TimeoutCallbackInfo<'mir, 'tcx>>,
}
-impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
- fn default() -> Self {
- let mut threads = IndexVec::new();
- // Create the main thread and add it to the list of threads.
- threads.push(Thread::new("main"));
- Self {
- active_thread: ThreadId::new(0),
- threads,
- sync: SynchronizationState::default(),
- thread_local_alloc_ids: Default::default(),
- yield_active_thread: false,
- timeout_callbacks: FxHashMap::default(),
- }
- }
-}
-
impl VisitTags for ThreadManager<'_, '_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
let ThreadManager {
}
}
+impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
+ fn default() -> Self {
+ let mut threads = IndexVec::new();
+ // Create the main thread and add it to the list of threads.
+ threads.push(Thread::new(Some("main"), None));
+ Self {
+ active_thread: ThreadId::new(0),
+ threads,
+ sync: SynchronizationState::default(),
+ thread_local_alloc_ids: Default::default(),
+ yield_active_thread: false,
+ timeout_callbacks: FxHashMap::default(),
+ }
+ }
+}
+
impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
- pub(crate) fn init(ecx: &mut MiriInterpCx<'mir, 'tcx>) {
+ pub(crate) fn init(
+ ecx: &mut MiriInterpCx<'mir, 'tcx>,
+ on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>,
+ ) {
+ ecx.machine.threads.threads[ThreadId::new(0)].on_stack_empty = Some(on_main_stack_empty);
if ecx.tcx.sess.target.os.as_ref() != "windows" {
// The main thread can *not* be joined on except on windows.
ecx.machine.threads.threads[ThreadId::new(0)].join_status = ThreadJoinStatus::Detached;
}
/// Create a new thread and returns its id.
- fn create_thread(&mut self) -> ThreadId {
+ fn create_thread(&mut self, on_stack_empty: StackEmptyCallback<'mir, 'tcx>) -> ThreadId {
let new_thread_id = ThreadId::new(self.threads.len());
- self.threads.push(Default::default());
+ self.threads.push(Thread::new(None, Some(on_stack_empty)));
new_thread_id
}
}
/// Get a mutable borrow of the currently active thread.
+ /// (Private for a bit of protection.)
fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
&mut self.threads[self.active_thread]
}
/// long as we can and switch only when we have to (the active thread was
/// blocked, terminated, or has explicitly asked to be preempted).
fn schedule(&mut self, clock: &Clock) -> InterpResult<'tcx, SchedulingAction> {
- // Check whether the thread has **just** terminated (`check_terminated`
- // checks whether the thread has popped all its stack and if yes, sets
- // the thread state to terminated).
- if self.threads[self.active_thread].check_terminated() {
- return Ok(SchedulingAction::ExecuteDtors);
- }
- // If we get here again and the thread is *still* terminated, there are no more dtors to run.
- if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
- // The main thread terminated; stop the program.
- // We do *not* run TLS dtors of remaining threads, which seems to match rustc behavior.
- return Ok(SchedulingAction::Stop);
- }
// This thread and the program can keep going.
if self.threads[self.active_thread].state == ThreadState::Enabled
&& !self.yield_active_thread
// The currently active thread is still enabled, just continue with it.
return Ok(SchedulingAction::ExecuteStep);
}
- // The active thread yielded. Let's see if there are any timeouts to take care of. We do
- // this *before* running any other thread, to ensure that timeouts "in the past" fire before
- // any other thread can take an action. This ensures that for `pthread_cond_timedwait`, "an
- // error is returned if [...] the absolute time specified by abstime has already been passed
- // at the time of the call".
+ // The active thread yielded or got terminated. Let's see if there are any timeouts to take
+ // care of. We do this *before* running any other thread, to ensure that timeouts "in the
+ // past" fire before any other thread can take an action. This ensures that for
+ // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
+ // abstime has already been passed at the time of the call".
// <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
let potential_sleep_time =
self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time(clock)).min();
if potential_sleep_time == Some(Duration::new(0, 0)) {
return Ok(SchedulingAction::ExecuteTimeoutCallback);
}
- // No callbacks scheduled, pick a regular thread to execute.
+ // No callbacks immediately scheduled, pick a regular thread to execute.
// The active thread blocked or yielded. So we go search for another enabled thread.
// Crucially, we start searching at the current active thread ID, rather than at 0, since we
// want to avoid always scheduling threads 0 and 1 without ever making progress in thread 2.
// All threads are currently blocked, but we have unexecuted
// timeout_callbacks, which may unblock some of the threads. Hence,
// sleep until the first callback.
-
- clock.sleep(sleep_time);
- Ok(SchedulingAction::ExecuteTimeoutCallback)
+ Ok(SchedulingAction::Sleep(sleep_time))
} else {
throw_machine_stop!(TerminationInfo::Deadlock);
}
}
}
+ /// Start a regular (non-main) thread.
#[inline]
- fn create_thread(&mut self) -> ThreadId {
- let this = self.eval_context_mut();
- let id = this.machine.threads.create_thread();
- if let Some(data_race) = &mut this.machine.data_race {
- data_race.thread_created(&this.machine.threads, id);
- }
- id
- }
-
- #[inline]
- fn start_thread(
+ fn start_regular_thread(
&mut self,
thread: Option<MPlaceTy<'tcx, Provenance>>,
start_routine: Pointer<Option<Provenance>>,
let this = self.eval_context_mut();
// Create the new thread
- let new_thread_id = this.create_thread();
+ let new_thread_id = this.machine.threads.create_thread({
+ let mut state = tls::TlsDtorsState::default();
+ Box::new(move |m| state.on_stack_empty(m))
+ });
+ if let Some(data_race) = &mut this.machine.data_race {
+ data_race.thread_created(&this.machine.threads, new_thread_id);
+ }
// Write the current thread-id, switch to the next thread later
// to treat this write operation as occuring on the current thread.
this.machine.threads.get_total_thread_count()
}
- #[inline]
- fn has_terminated(&self, thread_id: ThreadId) -> bool {
- let this = self.eval_context_ref();
- this.machine.threads.has_terminated(thread_id)
- }
-
#[inline]
fn have_all_terminated(&self) -> bool {
let this = self.eval_context_ref();
where
'mir: 'c,
{
- let this = self.eval_context_ref();
- this.machine.threads.get_thread_name(thread)
+ self.eval_context_ref().machine.threads.get_thread_name(thread)
}
#[inline]
fn block_thread(&mut self, thread: ThreadId) {
- let this = self.eval_context_mut();
- this.machine.threads.block_thread(thread);
+ self.eval_context_mut().machine.threads.block_thread(thread);
}
#[inline]
fn unblock_thread(&mut self, thread: ThreadId) {
- let this = self.eval_context_mut();
- this.machine.threads.unblock_thread(thread);
+ self.eval_context_mut().machine.threads.unblock_thread(thread);
}
#[inline]
fn yield_active_thread(&mut self) {
- let this = self.eval_context_mut();
- this.machine.threads.yield_active_thread();
+ self.eval_context_mut().machine.threads.yield_active_thread();
}
#[inline]
Ok(())
}
+ #[inline]
+ fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
+ let this = self.eval_context_mut();
+ let mut callback = this
+ .active_thread_mut()
+ .on_stack_empty
+ .take()
+ .expect("`on_stack_empty` not set up, or already running");
+ let res = callback(this)?;
+ this.active_thread_mut().on_stack_empty = Some(callback);
+ Ok(res)
+ }
+
/// Decide which action to take next and on which thread.
#[inline]
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
/// Handles thread termination of the active thread: wakes up threads joining on this one,
/// and deallocated thread-local statics.
///
- /// This is called from `tls.rs` after handling the TLS dtors.
+ /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
#[inline]
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
+ let thread = this.active_thread_mut();
+ assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
+ thread.state = ThreadState::Terminated;
+
for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
}
/// Details of premature program termination.
pub enum TerminationInfo {
- Exit(i64),
+ Exit {
+ code: i64,
+ leak_check: bool,
+ },
Abort(String),
UnsupportedInIsolation(String),
StackedBorrowsUb {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use TerminationInfo::*;
match self {
- Exit(code) => write!(f, "the evaluated program completed with exit code {code}"),
+ Exit { code, .. } => write!(f, "the evaluated program completed with exit code {code}"),
Abort(msg) => write!(f, "{msg}"),
UnsupportedInIsolation(msg) => write!(f, "{msg}"),
Int2PtrWithStrictProvenance =>
/// Emit a custom diagnostic without going through the miri-engine machinery.
///
-/// Returns `Some` if this was regular program termination with a given exit code, `None` otherwise.
+/// Returns `Some` if this was regular program termination with a given exit code and a `bool` indicating whether a leak check should happen; `None` otherwise.
pub fn report_error<'tcx, 'mir>(
ecx: &InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>,
e: InterpErrorInfo<'tcx>,
-) -> Option<i64> {
+) -> Option<(i64, bool)> {
use InterpError::*;
let mut msg = vec![];
let info = info.downcast_ref::<TerminationInfo>().expect("invalid MachineStop payload");
use TerminationInfo::*;
let title = match info {
- Exit(code) => return Some(*code),
+ Exit { code, leak_check } => return Some((*code, *leak_check)),
Abort(_) => Some("abnormal termination"),
UnsupportedInIsolation(_) | Int2PtrWithStrictProvenance =>
Some("unsupported operation"),
use std::iter;
use std::panic::{self, AssertUnwindSafe};
use std::path::PathBuf;
+use std::task::Poll;
use std::thread;
use log::info;
use rustc_session::config::EntryFnType;
+use crate::shims::tls;
use crate::*;
#[derive(Copy, Clone, Debug, PartialEq)]
}
}
-/// Returns a freshly created `InterpCx`, along with an `MPlaceTy` representing
-/// the location where the return value of the `start` function will be
-/// written to.
+/// The state of the main thread. Implementation detail of `on_main_stack_empty`.
+#[derive(Default, Debug)]
+enum MainThreadState {
+ #[default]
+ Running,
+ TlsDtors(tls::TlsDtorsState),
+}
+
+impl MainThreadState {
+ fn on_main_stack_empty<'tcx>(
+ &mut self,
+ this: &mut MiriInterpCx<'_, 'tcx>,
+ ) -> InterpResult<'tcx, Poll<()>> {
+ use MainThreadState::*;
+ match self {
+ Running => {
+ *self = TlsDtors(Default::default());
+ }
+ TlsDtors(state) =>
+ match state.on_stack_empty(this)? {
+ Poll::Pending => {} // just keep going
+ Poll::Ready(()) => {
+ // Need to call `thread_terminated` ourselves since we are not going to
+ // return to the scheduler loop.
+ this.thread_terminated()?;
+ // Raise exception to stop program execution.
+ let ret_place = MPlaceTy::from_aligned_ptr(
+ this.machine.main_fn_ret_place.unwrap().ptr,
+ this.machine.layouts.isize,
+ );
+ let exit_code =
+ this.read_scalar(&ret_place.into())?.to_machine_isize(this)?;
+ throw_machine_stop!(TerminationInfo::Exit {
+ code: exit_code,
+ leak_check: true
+ });
+ }
+ },
+ }
+ Ok(Poll::Pending)
+ }
+}
+
+/// Returns a freshly created `InterpCx`.
/// Public because this is also used by `priroda`.
pub fn create_ecx<'mir, 'tcx: 'mir>(
tcx: TyCtxt<'tcx>,
entry_id: DefId,
entry_type: EntryFnType,
config: &MiriConfig,
-) -> InterpResult<'tcx, (InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>, MPlaceTy<'tcx, Provenance>)>
-{
+) -> InterpResult<'tcx, InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>> {
let param_env = ty::ParamEnv::reveal_all();
let layout_cx = LayoutCx { tcx, param_env };
let mut ecx = InterpCx::new(
);
// Some parts of initialization require a full `InterpCx`.
- MiriMachine::late_init(&mut ecx, config)?;
+ MiriMachine::late_init(&mut ecx, config, {
+ let mut state = MainThreadState::default();
+ // Cannot capture anything GC-relevant here.
+ Box::new(move |m| state.on_main_stack_empty(m))
+ })?;
// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"], Namespace::ValueNS);
// Return place (in static memory so that it does not count as leak).
let ret_place = ecx.allocate(ecx.machine.layouts.isize, MiriMemoryKind::Machine.into())?;
+ ecx.machine.main_fn_ret_place = Some(*ret_place);
// Call start function.
match entry_type {
}
}
- Ok((ecx, ret_place))
+ Ok(ecx)
}
/// Evaluates the entry function specified by `entry_id`.
// Copy setting before we move `config`.
let ignore_leaks = config.ignore_leaks;
- let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, &config) {
+ let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config) {
Ok(v) => v,
Err(err) => {
err.print_backtrace();
};
// Perform the main execution.
- let res: thread::Result<InterpResult<'_, i64>> = panic::catch_unwind(AssertUnwindSafe(|| {
- // Main loop.
+ let res: thread::Result<InterpResult<'_, !>> = panic::catch_unwind(AssertUnwindSafe(|| {
+ // Main loop. Goes on forever until an interrupt is triggered (represented as `InterpError`).
loop {
match ecx.schedule()? {
SchedulingAction::ExecuteStep => {
- assert!(ecx.step()?, "a terminated thread was scheduled for execution");
+ if !ecx.step()? {
+ // See if this thread can do something else.
+ match ecx.run_on_stack_empty()? {
+ Poll::Pending => {} // keep going
+ Poll::Ready(()) => ecx.thread_terminated()?,
+ }
+ }
}
SchedulingAction::ExecuteTimeoutCallback => {
ecx.run_timeout_callback()?;
}
- SchedulingAction::ExecuteDtors => {
- // This will either enable the thread again (so we go back
- // to `ExecuteStep`), or determine that this thread is done
- // for good.
- ecx.schedule_next_tls_dtor_for_active_thread()?;
- }
- SchedulingAction::Stop => {
- break;
+ SchedulingAction::Sleep(duration) => {
+ ecx.machine.clock.sleep(duration);
}
}
}
- let return_code = ecx.read_scalar(&ret_place.into())?.to_machine_isize(&ecx)?;
- Ok(return_code)
}));
let res = res.unwrap_or_else(|panic_payload| {
ecx.handle_ice();
panic::resume_unwind(panic_payload)
});
+ let res = match res {
+ Err(res) => res,
+ // `Ok` can never happen
+ Ok(never) => match never {},
+ };
// Machine cleanup. Only do this if all threads have terminated; threads that are still running
// might cause Stacked Borrows errors (https://github.com/rust-lang/miri/issues/2396).
}
// Process the result.
- match res {
- Ok(return_code) => {
- if !ignore_leaks {
- // Check for thread leaks.
- if !ecx.have_all_terminated() {
- tcx.sess.err(
- "the main thread terminated without waiting for all remaining threads",
- );
- tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
- return None;
- }
- // Check for memory leaks.
- info!("Additonal static roots: {:?}", ecx.machine.static_roots);
- let leaks = ecx.leak_report(&ecx.machine.static_roots);
- if leaks != 0 {
- tcx.sess.err("the evaluated program leaked memory");
- tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
- // Ignore the provided return code - let the reported error
- // determine the return code.
- return None;
- }
- }
- Some(return_code)
+ let (return_code, leak_check) = report_error(&ecx, res)?;
+ if leak_check && !ignore_leaks {
+ // Check for thread leaks.
+ if !ecx.have_all_terminated() {
+ tcx.sess.err("the main thread terminated without waiting for all remaining threads");
+ tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
+ return None;
+ }
+ // Check for memory leaks.
+ info!("Additonal static roots: {:?}", ecx.machine.static_roots);
+ let leaks = ecx.leak_report(&ecx.machine.static_roots);
+ if leaks != 0 {
+ tcx.sess.err("the evaluated program leaked memory");
+ tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
+ // Ignore the provided return code - let the reported error
+ // determine the return code.
+ return None;
}
- Err(e) => report_error(&ecx, e),
}
+ Some(return_code)
}
/// Turns an array of arguments into a Windows command line string.
pub use crate::shims::os_str::EvalContextExt as _;
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _};
pub use crate::shims::time::EvalContextExt as _;
-pub use crate::shims::tls::{EvalContextExt as _, TlsData};
+pub use crate::shims::tls::TlsData;
pub use crate::shims::EvalContextExt as _;
pub use crate::clock::{Clock, Instant};
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
- thread::{EvalContextExt as _, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time},
+ thread::{
+ EvalContextExt as _, SchedulingAction, StackEmptyCallback, ThreadId, ThreadManager,
+ ThreadState, Time,
+ },
};
pub use crate::diagnostics::{
report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,
/// Miri does not expose env vars from the host to the emulated program.
pub(crate) env_vars: EnvVars<'tcx>,
+ /// Return place of the main function.
+ pub(crate) main_fn_ret_place: Option<MemPlace<Provenance>>,
+
/// Program arguments (`Option` because we can only initialize them after creating the ecx).
/// These are *pointers* to argc/argv because macOS.
/// We also need the full command line as one string because of Windows.
intptrcast: RefCell::new(intptrcast::GlobalStateInner::new(config)),
// `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
env_vars: EnvVars::default(),
+ main_fn_ret_place: None,
argc: None,
argv: None,
cmd_line: None,
pub(crate) fn late_init(
this: &mut MiriInterpCx<'mir, 'tcx>,
config: &MiriConfig,
+ on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>,
) -> InterpResult<'tcx> {
EnvVars::init(this, config)?;
MiriMachine::init_extern_statics(this)?;
- ThreadManager::init(this);
+ ThreadManager::init(this, on_main_stack_empty);
Ok(())
}
threads,
tls,
env_vars,
+ main_fn_ret_place,
argc,
argv,
cmd_line,
data_race.visit_tags(visit);
stacked_borrows.visit_tags(visit);
intptrcast.visit_tags(visit);
+ main_fn_ret_place.visit_tags(visit);
argc.visit_tags(visit);
argv.visit_tags(visit);
cmd_line.visit_tags(visit);
let [code] = this.check_shim(abi, exp_abi, link_name, args)?;
// it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
let code = this.read_scalar(code)?.to_i32()?;
- throw_machine_stop!(TerminationInfo::Exit(code.into()));
+ throw_machine_stop!(TerminationInfo::Exit { code: code.into(), leak_check: false });
}
"abort" => {
let [] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
//! Implement thread-local storage.
use std::collections::btree_map::Entry as BTreeEntry;
-use std::collections::hash_map::Entry as HashMapEntry;
use std::collections::BTreeMap;
+use std::task::Poll;
use log::trace;
-use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty;
use rustc_target::abi::{HasDataLayout, Size};
use rustc_target::spec::abi::Abi;
dtor: Option<ty::Instance<'tcx>>,
}
-#[derive(Clone, Debug)]
-struct RunningDtorsState {
+#[derive(Default, Debug)]
+struct RunningDtorState {
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
/// have not tried to retrieve a TLS destructor yet or that we already tried
/// all keys.
- last_dtor_key: Option<TlsKey>,
+ last_key: Option<TlsKey>,
}
#[derive(Debug)]
/// A single per thread destructor of the thread local storage (that's how
/// things work on macOS) with a data argument.
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Provenance>)>,
-
- /// State for currently running TLS dtors. If this map contains a key for a
- /// specific thread, it means that we are in the "destruct" phase, during
- /// which some operations are UB.
- dtors_running: FxHashMap<ThreadId, RunningDtorsState>,
}
impl<'tcx> Default for TlsData<'tcx> {
next_key: 1, // start with 1 as we must not use 0 on Windows
keys: Default::default(),
macos_thread_dtors: Default::default(),
- dtors_running: Default::default(),
}
}
}
dtor: ty::Instance<'tcx>,
data: Scalar<Provenance>,
) -> InterpResult<'tcx> {
- if self.dtors_running.contains_key(&thread) {
- // UB, according to libstd docs.
- throw_ub_format!(
- "setting thread's local storage destructor while destructors are already running"
- );
- }
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
throw_unsup_format!(
"setting more than one thread local storage destructor for the same thread is not supported"
None
}
- /// Set that dtors are running for `thread`. It is guaranteed not to change
- /// the existing values stored in `dtors_running` for this thread. Returns
- /// `true` if dtors for `thread` are already running.
- fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool {
- match self.dtors_running.entry(thread) {
- HashMapEntry::Occupied(_) => true,
- HashMapEntry::Vacant(entry) => {
- // We cannot just do `self.dtors_running.insert` because that
- // would overwrite `last_dtor_key` with `None`.
- entry.insert(RunningDtorsState { last_dtor_key: None });
- false
- }
- }
- }
-
/// Delete all TLS entries for the given thread. This function should be
/// called after all TLS destructors have already finished.
fn delete_all_thread_tls(&mut self, thread_id: ThreadId) {
impl VisitTags for TlsData<'_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
- let TlsData { keys, macos_thread_dtors, next_key: _, dtors_running: _ } = self;
+ let TlsData { keys, macos_thread_dtors, next_key: _ } = self;
for scalar in keys.values().flat_map(|v| v.data.values()) {
scalar.visit_tags(visit);
}
}
+#[derive(Debug, Default)]
+pub struct TlsDtorsState(TlsDtorsStatePriv);
+
+#[derive(Debug, Default)]
+enum TlsDtorsStatePriv {
+ #[default]
+ Init,
+ PthreadDtors(RunningDtorState),
+ Done,
+}
+
+impl TlsDtorsState {
+ pub fn on_stack_empty<'tcx>(
+ &mut self,
+ this: &mut MiriInterpCx<'_, 'tcx>,
+ ) -> InterpResult<'tcx, Poll<()>> {
+ use TlsDtorsStatePriv::*;
+ match &mut self.0 {
+ Init => {
+ match this.tcx.sess.target.os.as_ref() {
+ "linux" => {
+ // Run the pthread dtors.
+ self.0 = PthreadDtors(Default::default());
+ }
+ "macos" => {
+ // The macOS thread wide destructor runs "before any TLS slots get
+ // freed", so do that first.
+ this.schedule_macos_tls_dtor()?;
+ // When the stack is empty again, go on with the pthread dtors.
+ self.0 = PthreadDtors(Default::default());
+ }
+ "windows" => {
+ // Run the special magic hook.
+ this.schedule_windows_tls_dtors()?;
+ // And move to the final state.
+ self.0 = Done;
+ }
+ _ => {
+ // No TLS support for this platform, directly move to final state.
+ self.0 = Done;
+ }
+ }
+ }
+ PthreadDtors(state) => {
+ match this.schedule_next_pthread_tls_dtor(state)? {
+ Poll::Pending => {} // just keep going
+ Poll::Ready(()) => self.0 = Done,
+ }
+ }
+ Done => {
+ this.machine.tls.delete_all_thread_tls(this.get_active_thread());
+ return Ok(Poll::Ready(()));
+ }
+ }
+
+ Ok(Poll::Pending)
+ }
+}
+
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Schedule TLS destructors for Windows.
/// On windows, TLS destructors are managed by std.
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let active_thread = this.get_active_thread();
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
None,
StackPopCleanup::Root { cleanup: true },
)?;
-
- this.enable_thread(active_thread);
Ok(())
}
/// Schedule the MacOS thread destructor of the thread local storage to be
- /// executed. Returns `true` if scheduled.
- ///
- /// Note: It is safe to call this function also on other Unixes.
- fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
+ /// executed.
+ fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let thread_id = this.get_active_thread();
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
None,
StackPopCleanup::Root { cleanup: true },
)?;
-
- // Enable the thread so that it steps through the destructor which
- // we just scheduled. Since we deleted the destructor, it is
- // guaranteed that we will schedule it again. The `dtors_running`
- // flag will prevent the code from adding the destructor again.
- this.enable_thread(thread_id);
- Ok(true)
- } else {
- Ok(false)
}
+ Ok(())
}
/// Schedule a pthread TLS destructor. Returns `true` if found
/// a destructor to schedule, and `false` otherwise.
- fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
+ fn schedule_next_pthread_tls_dtor(
+ &mut self,
+ state: &mut RunningDtorState,
+ ) -> InterpResult<'tcx, Poll<()>> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
- assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
// Fetch next dtor after `key`.
- let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key;
- let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
+ let dtor = match this.machine.tls.fetch_tls_dtor(state.last_key, active_thread) {
dtor @ Some(_) => dtor,
// We ran each dtor once, start over from the beginning.
None => this.machine.tls.fetch_tls_dtor(None, active_thread),
};
if let Some((instance, ptr, key)) = dtor {
- this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key =
- Some(key);
+ state.last_key = Some(key);
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
assert!(
!ptr.to_machine_usize(this).unwrap() != 0,
StackPopCleanup::Root { cleanup: true },
)?;
- this.enable_thread(active_thread);
- return Ok(true);
+ return Ok(Poll::Pending);
}
- this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
-
- Ok(false)
- }
-}
-impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
-pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
- /// Schedule an active thread's TLS destructor to run on the active thread.
- /// Note that this function does not run the destructors itself, it just
- /// schedules them one by one each time it is called and reenables the
- /// thread so that it can be executed normally by the main execution loop.
- ///
- /// Note: we consistently run TLS destructors for all threads, including the
- /// main thread. However, it is not clear that we should run the TLS
- /// destructors for the main thread. See issue:
- /// <https://github.com/rust-lang/rust/issues/28129>.
- fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
- let this = self.eval_context_mut();
- let active_thread = this.get_active_thread();
- trace!("schedule_next_tls_dtor_for_active_thread on thread {:?}", active_thread);
-
- if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
- // This is the first time we got asked to schedule a destructor. The
- // Windows schedule destructor function must be called exactly once,
- // this is why it is in this block.
- if this.tcx.sess.target.os == "windows" {
- // On Windows, we signal that the thread quit by starting the
- // relevant function, reenabling the thread, and going back to
- // the scheduler.
- this.schedule_windows_tls_dtors()?;
- return Ok(());
- }
- }
- // The remaining dtors make some progress each time around the scheduler loop,
- // until they return `false` to indicate that they are done.
-
- // The macOS thread wide destructor runs "before any TLS slots get
- // freed", so do that first.
- if this.schedule_macos_tls_dtor()? {
- // We have scheduled a MacOS dtor to run on the thread. Execute it
- // to completion and come back here. Scheduling a destructor
- // destroys it, so we will not enter this branch again.
- return Ok(());
- }
- if this.schedule_next_pthread_tls_dtor()? {
- // We have scheduled a pthread destructor and removed it from the
- // destructors list. Run it to completion and come back here.
- return Ok(());
- }
-
- // All dtors done!
- this.machine.tls.delete_all_thread_tls(active_thread);
- this.thread_terminated()?;
-
- Ok(())
+ Ok(Poll::Ready(()))
}
}
let func_arg = this.read_immediate(arg)?;
- this.start_thread(
+ this.start_regular_thread(
Some(thread_info_place),
start_routine,
Abi::C { unwind: false },
throw_unsup_format!("non-null `lpThreadAttributes` in `CreateThread`")
}
- this.start_thread(
+ this.start_regular_thread(
thread,
start_routine,
Abi::System { unwind: false },