//! Implement thread-local storage.
use std::collections::btree_map::Entry as BTreeEntry;
-use std::collections::hash_map::Entry as HashMapEntry;
use std::collections::BTreeMap;
+use std::task::Poll;
use log::trace;
-use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty;
use rustc_target::abi::{HasDataLayout, Size};
use rustc_target::spec::abi::Abi;
dtor: Option<ty::Instance<'tcx>>,
}
-#[derive(Clone, Debug)]
-struct RunningDtorsState {
+#[derive(Default, Debug)]
+struct RunningDtorState {
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
/// have not tried to retrieve a TLS destructor yet or that we already tried
/// all keys.
- last_dtor_key: Option<TlsKey>,
+ last_key: Option<TlsKey>,
}
#[derive(Debug)]
/// A single per thread destructor of the thread local storage (that's how
/// things work on macOS) with a data argument.
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Provenance>)>,
-
- /// State for currently running TLS dtors. If this map contains a key for a
- /// specific thread, it means that we are in the "destruct" phase, during
- /// which some operations are UB.
- dtors_running: FxHashMap<ThreadId, RunningDtorsState>,
}
impl<'tcx> Default for TlsData<'tcx> {
next_key: 1, // start with 1 as we must not use 0 on Windows
keys: Default::default(),
macos_thread_dtors: Default::default(),
- dtors_running: Default::default(),
}
}
}
dtor: ty::Instance<'tcx>,
data: Scalar<Provenance>,
) -> InterpResult<'tcx> {
- if self.dtors_running.contains_key(&thread) {
- // UB, according to libstd docs.
- throw_ub_format!(
- "setting thread's local storage destructor while destructors are already running"
- );
- }
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
throw_unsup_format!(
"setting more than one thread local storage destructor for the same thread is not supported"
None
}
- /// Set that dtors are running for `thread`. It is guaranteed not to change
- /// the existing values stored in `dtors_running` for this thread. Returns
- /// `true` if dtors for `thread` are already running.
- fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool {
- match self.dtors_running.entry(thread) {
- HashMapEntry::Occupied(_) => true,
- HashMapEntry::Vacant(entry) => {
- // We cannot just do `self.dtors_running.insert` because that
- // would overwrite `last_dtor_key` with `None`.
- entry.insert(RunningDtorsState { last_dtor_key: None });
- false
- }
- }
- }
-
/// Delete all TLS entries for the given thread. This function should be
/// called after all TLS destructors have already finished.
fn delete_all_thread_tls(&mut self, thread_id: ThreadId) {
}
impl VisitTags for TlsData<'_> {
- fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
- let TlsData { keys, macos_thread_dtors, next_key: _, dtors_running: _ } = self;
+ fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
+ let TlsData { keys, macos_thread_dtors, next_key: _ } = self;
for scalar in keys.values().flat_map(|v| v.data.values()) {
scalar.visit_tags(visit);
}
}
+#[derive(Debug, Default)]
+pub struct TlsDtorsState(TlsDtorsStatePriv);
+
+#[derive(Debug, Default)]
+enum TlsDtorsStatePriv {
+ #[default]
+ Init,
+ PthreadDtors(RunningDtorState),
+ Done,
+}
+
+impl TlsDtorsState {
+ pub fn on_stack_empty<'tcx>(
+ &mut self,
+ this: &mut MiriInterpCx<'_, 'tcx>,
+ ) -> InterpResult<'tcx, Poll<()>> {
+ use TlsDtorsStatePriv::*;
+ match &mut self.0 {
+ Init => {
+ match this.tcx.sess.target.os.as_ref() {
+ "linux" | "freebsd" | "android" => {
+ // Run the pthread dtors.
+ self.0 = PthreadDtors(Default::default());
+ }
+ "macos" => {
+ // The macOS thread wide destructor runs "before any TLS slots get
+ // freed", so do that first.
+ this.schedule_macos_tls_dtor()?;
+ // When the stack is empty again, go on with the pthread dtors.
+ self.0 = PthreadDtors(Default::default());
+ }
+ "windows" => {
+ // Run the special magic hook.
+ this.schedule_windows_tls_dtors()?;
+ // And move to the final state.
+ self.0 = Done;
+ }
+ "wasi" | "none" => {
+ // No OS, no TLS dtors.
+ // FIXME: should we do something on wasi?
+ self.0 = Done;
+ }
+ os => {
+ throw_unsup_format!(
+ "the TLS machinery does not know how to handle OS `{os}`"
+ );
+ }
+ }
+ }
+ PthreadDtors(state) => {
+ match this.schedule_next_pthread_tls_dtor(state)? {
+ Poll::Pending => {} // just keep going
+ Poll::Ready(()) => self.0 = Done,
+ }
+ }
+ Done => {
+ this.machine.tls.delete_all_thread_tls(this.get_active_thread());
+ return Ok(Poll::Ready(()));
+ }
+ }
+
+ Ok(Poll::Pending)
+ }
+}
+
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Schedule TLS destructors for Windows.
/// On windows, TLS destructors are managed by std.
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- let active_thread = this.get_active_thread();
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
return Ok(());
}
let thread_callback =
- this.eval_windows("thread_local_key", "p_thread_callback")?.to_pointer(this)?;
+ this.eval_windows("thread_local_key", "p_thread_callback").to_pointer(this)?;
let thread_callback = this.get_ptr_fn(thread_callback)?.as_instance()?;
// FIXME: Technically, the reason should be `DLL_PROCESS_DETACH` when the main thread exits
// but std treats both the same.
- let reason = this.eval_windows("c", "DLL_THREAD_DETACH")?;
+ let reason = this.eval_windows("c", "DLL_THREAD_DETACH");
// The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
// FIXME: `h` should be a handle to the current module and what `pv` should be is unknown
None,
StackPopCleanup::Root { cleanup: true },
)?;
-
- this.enable_thread(active_thread);
Ok(())
}
/// Schedule the MacOS thread destructor of the thread local storage to be
- /// executed. Returns `true` if scheduled.
- ///
- /// Note: It is safe to call this function also on other Unixes.
- fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
+ /// executed.
+ fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let thread_id = this.get_active_thread();
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
None,
StackPopCleanup::Root { cleanup: true },
)?;
-
- // Enable the thread so that it steps through the destructor which
- // we just scheduled. Since we deleted the destructor, it is
- // guaranteed that we will schedule it again. The `dtors_running`
- // flag will prevent the code from adding the destructor again.
- this.enable_thread(thread_id);
- Ok(true)
- } else {
- Ok(false)
}
+ Ok(())
}
/// Schedule a pthread TLS destructor. Returns `true` if found
/// a destructor to schedule, and `false` otherwise.
- fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
+ fn schedule_next_pthread_tls_dtor(
+ &mut self,
+ state: &mut RunningDtorState,
+ ) -> InterpResult<'tcx, Poll<()>> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
- assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
// Fetch next dtor after `key`.
- let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key;
- let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
+ let dtor = match this.machine.tls.fetch_tls_dtor(state.last_key, active_thread) {
dtor @ Some(_) => dtor,
// We ran each dtor once, start over from the beginning.
None => this.machine.tls.fetch_tls_dtor(None, active_thread),
};
if let Some((instance, ptr, key)) = dtor {
- this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key =
- Some(key);
+ state.last_key = Some(key);
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
assert!(
!ptr.to_machine_usize(this).unwrap() != 0,
StackPopCleanup::Root { cleanup: true },
)?;
- this.enable_thread(active_thread);
- return Ok(true);
- }
- this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
-
- Ok(false)
- }
-}
-
-impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
-pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
- /// Schedule an active thread's TLS destructor to run on the active thread.
- /// Note that this function does not run the destructors itself, it just
- /// schedules them one by one each time it is called and reenables the
- /// thread so that it can be executed normally by the main execution loop.
- ///
- /// Note: we consistently run TLS destructors for all threads, including the
- /// main thread. However, it is not clear that we should run the TLS
- /// destructors for the main thread. See issue:
- /// <https://github.com/rust-lang/rust/issues/28129>.
- fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
- let this = self.eval_context_mut();
- let active_thread = this.get_active_thread();
- trace!("schedule_next_tls_dtor_for_active_thread on thread {:?}", active_thread);
-
- if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
- // This is the first time we got asked to schedule a destructor. The
- // Windows schedule destructor function must be called exactly once,
- // this is why it is in this block.
- if this.tcx.sess.target.os == "windows" {
- // On Windows, we signal that the thread quit by starting the
- // relevant function, reenabling the thread, and going back to
- // the scheduler.
- this.schedule_windows_tls_dtors()?;
- return Ok(());
- }
+ return Ok(Poll::Pending);
}
- // The remaining dtors make some progress each time around the scheduler loop,
- // until they return `false` to indicate that they are done.
-
- // The macOS thread wide destructor runs "before any TLS slots get
- // freed", so do that first.
- if this.schedule_macos_tls_dtor()? {
- // We have scheduled a MacOS dtor to run on the thread. Execute it
- // to completion and come back here. Scheduling a destructor
- // destroys it, so we will not enter this branch again.
- return Ok(());
- }
- if this.schedule_next_pthread_tls_dtor()? {
- // We have scheduled a pthread destructor and removed it from the
- // destructors list. Run it to completion and come back here.
- return Ok(());
- }
-
- // All dtors done!
- this.machine.tls.delete_all_thread_tls(active_thread);
- this.thread_terminated()?;
- Ok(())
+ Ok(Poll::Ready(()))
}
}