]> git.lizzy.rs Git - rust.git/blobdiff - src/thread.rs
Auto merge of #2427 - Nilstrieb:doc-fix, r=saethlin
[rust.git] / src / thread.rs
index e61761e599cd85340d4b1ad40db7f45f9ccc1669..683694f482eaab896b441574de2674d727842271 100644 (file)
@@ -2,7 +2,6 @@
 
 use std::cell::RefCell;
 use std::collections::hash_map::Entry;
-use std::convert::TryFrom;
 use std::num::TryFromIntError;
 use std::time::{Duration, Instant, SystemTime};
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def_id::DefId;
 use rustc_index::vec::{Idx, IndexVec};
-use rustc_middle::{
-    middle::codegen_fn_attrs::CodegenFnAttrFlags,
-    mir,
-    ty::{self, Instance},
-};
+use rustc_middle::mir::Mutability;
 
+use crate::concurrency::data_race;
 use crate::sync::SynchronizationState;
 use crate::*;
 
@@ -63,7 +59,7 @@ fn index(self) -> usize {
 impl TryFrom<u64> for ThreadId {
     type Error = TryFromIntError;
     fn try_from(id: u64) -> Result<Self, Self::Error> {
-        u32::try_from(id).map(|id_u32| Self(id_u32))
+        u32::try_from(id).map(Self)
     }
 }
 
@@ -74,8 +70,8 @@ fn from(id: u32) -> Self {
 }
 
 impl ThreadId {
-    pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
-        Scalar::from_u32(u32::try_from(self.0).unwrap())
+    pub fn to_u32_scalar(&self) -> Scalar<Provenance> {
+        Scalar::from_u32(self.0)
     }
 }
 
@@ -91,8 +87,8 @@ pub enum ThreadState {
     /// responsibility of the synchronization primitives to track threads that
     /// are blocked by them.
     BlockedOnSync,
-    /// The thread has terminated its execution (we do not delete terminated
-    /// threads).
+    /// The thread has terminated its execution. We do not delete terminated
+    /// threads (FIXME: why?).
     Terminated,
 }
 
@@ -111,12 +107,23 @@ enum ThreadJoinStatus {
 /// A thread.
 pub struct Thread<'mir, 'tcx> {
     state: ThreadState,
+
     /// Name of the thread.
     thread_name: Option<Vec<u8>>,
+
     /// The virtual call stack.
-    stack: Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>>,
+    stack: Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>>,
+
     /// The join status.
     join_status: ThreadJoinStatus,
+
+    /// The temporary used for storing the argument of
+    /// the call to `miri_start_panic` (the panic payload) when unwinding.
+    /// This is pointer-sized, and matches the `Payload` type in `src/libpanic_unwind/miri.rs`.
+    pub(crate) panic_payload: Option<Scalar<Provenance>>,
+
+    /// Last OS error location in memory. It is a 32-bit integer.
+    pub(crate) last_error: Option<MPlaceTy<'tcx, Provenance>>,
 }
 
 impl<'mir, 'tcx> Thread<'mir, 'tcx> {
@@ -134,17 +141,19 @@ fn check_terminated(&mut self) -> bool {
 
     /// Get the name of the current thread, or `<unnamed>` if it was not set.
     fn thread_name(&self) -> &[u8] {
-        if let Some(ref thread_name) = self.thread_name {
-            thread_name
-        } else {
-            b"<unnamed>"
-        }
+        if let Some(ref thread_name) = self.thread_name { thread_name } else { b"<unnamed>" }
     }
 }
 
 impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        write!(f, "{}({:?}, {:?})", String::from_utf8_lossy(self.thread_name()), self.state, self.join_status)
+        write!(
+            f,
+            "{}({:?}, {:?})",
+            String::from_utf8_lossy(self.thread_name()),
+            self.state,
+            self.join_status
+        )
     }
 }
 
@@ -155,10 +164,21 @@ fn default() -> Self {
             thread_name: None,
             stack: Vec::new(),
             join_status: ThreadJoinStatus::Joinable,
+            panic_payload: None,
+            last_error: None,
         }
     }
 }
 
+impl<'mir, 'tcx> Thread<'mir, 'tcx> {
+    fn new(name: &str) -> Self {
+        let mut thread = Thread::default();
+        thread.thread_name = Some(Vec::from(name.as_bytes()));
+        thread
+    }
+}
+
+/// A specific moment in time.
 #[derive(Debug)]
 pub enum Time {
     Monotonic(Instant),
@@ -207,7 +227,7 @@ pub struct ThreadManager<'mir, 'tcx> {
     pub(crate) sync: SynchronizationState,
     /// A mapping from a thread-local static to an allocation id of a thread
     /// specific allocation.
-    thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), AllocId>>,
+    thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), Pointer<Provenance>>>,
     /// A flag that indicates that we should change the active thread.
     yield_active_thread: bool,
     /// Callbacks that are called once the specified time passes.
@@ -218,13 +238,13 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
     fn default() -> Self {
         let mut threads = IndexVec::new();
         // Create the main thread and add it to the list of threads.
-        let mut main_thread = Thread::default();
+        let mut main_thread = Thread::new("main");
         // The main thread can *not* be joined on.
         main_thread.join_status = ThreadJoinStatus::Detached;
         threads.push(main_thread);
         Self {
             active_thread: ThreadId::new(0),
-            threads: threads,
+            threads,
             sync: SynchronizationState::default(),
             thread_local_alloc_ids: Default::default(),
             yield_active_thread: false,
@@ -236,31 +256,39 @@ fn default() -> Self {
 impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
     /// Check if we have an allocation for the given thread local static for the
     /// active thread.
-    fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<AllocId> {
+    fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<Pointer<Provenance>> {
         self.thread_local_alloc_ids.borrow().get(&(def_id, self.active_thread)).cloned()
     }
 
-    /// Set the allocation id as the allocation id of the given thread local
+    /// Set the pointer for the allocation of the given thread local
     /// static for the active thread.
     ///
     /// Panics if a thread local is initialized twice for the same thread.
-    fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) {
+    fn set_thread_local_alloc(&self, def_id: DefId, ptr: Pointer<Provenance>) {
         self.thread_local_alloc_ids
             .borrow_mut()
-            .insert((def_id, self.active_thread), new_alloc_id)
-            .unwrap_none();
+            .try_insert((def_id, self.active_thread), ptr)
+            .unwrap();
     }
 
     /// Borrow the stack of the active thread.
-    fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] {
+    pub fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>] {
         &self.threads[self.active_thread].stack
     }
 
     /// Mutably borrow the stack of the active thread.
-    fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>> {
+    fn active_thread_stack_mut(
+        &mut self,
+    ) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>> {
         &mut self.threads[self.active_thread].stack
     }
 
+    pub fn all_stacks(
+        &self,
+    ) -> impl Iterator<Item = &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>]> {
+        self.threads.iter().map(|t| &t.stack[..])
+    }
+
     /// Create a new thread and returns its id.
     fn create_thread(&mut self) -> ThreadId {
         let new_thread_id = ThreadId::new(self.threads.len());
@@ -277,20 +305,31 @@ fn set_active_thread_id(&mut self, id: ThreadId) -> ThreadId {
     }
 
     /// Get the id of the currently active thread.
-    fn get_active_thread_id(&self) -> ThreadId {
+    pub fn get_active_thread_id(&self) -> ThreadId {
         self.active_thread
     }
 
     /// Get the total number of threads that were ever spawn by this program.
-    fn get_total_thread_count(&self) -> usize {
+    pub fn get_total_thread_count(&self) -> usize {
         self.threads.len()
     }
 
+    /// Get the total of threads that are currently live, i.e., not yet terminated.
+    /// (They might be blocked.)
+    pub fn get_live_thread_count(&self) -> usize {
+        self.threads.iter().filter(|t| !matches!(t.state, ThreadState::Terminated)).count()
+    }
+
     /// Has the given thread terminated?
     fn has_terminated(&self, thread_id: ThreadId) -> bool {
         self.threads[thread_id].state == ThreadState::Terminated
     }
 
+    /// Have all threads terminated?
+    fn have_all_terminated(&self) -> bool {
+        self.threads.iter().all(|thread| thread.state == ThreadState::Terminated)
+    }
+
     /// Enable the thread for execution. The thread must be terminated.
     fn enable_thread(&mut self, thread_id: ThreadId) {
         assert!(self.has_terminated(thread_id));
@@ -318,7 +357,11 @@ fn detach_thread(&mut self, id: ThreadId) -> InterpResult<'tcx> {
     }
 
     /// Mark that the active thread tries to join the thread with `joined_thread_id`.
-    fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
+    fn join_thread(
+        &mut self,
+        joined_thread_id: ThreadId,
+        data_race: Option<&mut data_race::GlobalState>,
+    ) -> InterpResult<'tcx> {
         if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
             throw_ub_format!("trying to join a detached or already joined thread");
         }
@@ -342,20 +385,30 @@ fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
                 self.active_thread,
                 joined_thread_id
             );
+        } else {
+            // The thread has already terminated - mark join happens-before
+            if let Some(data_race) = data_race {
+                data_race.thread_joined(self, self.active_thread, joined_thread_id);
+            }
         }
         Ok(())
     }
 
     /// Set the name of the active thread.
-    fn set_thread_name(&mut self, new_thread_name: Vec<u8>) {
+    fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
         self.active_thread_mut().thread_name = Some(new_thread_name);
     }
 
     /// Get the name of the active thread.
-    fn get_thread_name(&self) -> &[u8] {
+    pub fn get_active_thread_name(&self) -> &[u8] {
         self.active_thread_ref().thread_name()
     }
 
+    /// Get the name of the given thread.
+    pub fn get_thread_name(&self, thread: ThreadId) -> &[u8] {
+        self.threads[thread].thread_name()
+    }
+
     /// Put the thread into the blocked state.
     fn block_thread(&mut self, thread: ThreadId) {
         let state = &mut self.threads[thread].state;
@@ -372,10 +425,16 @@ fn unblock_thread(&mut self, thread: ThreadId) {
 
     /// Change the active thread to some enabled thread.
     fn yield_active_thread(&mut self) {
+        // We do not yield immediately, as swapping out the current stack while executing a MIR statement
+        // could lead to all sorts of confusion.
+        // We should only switch stacks between steps.
         self.yield_active_thread = true;
     }
 
     /// Register the given `callback` to be called once the `call_time` passes.
+    ///
+    /// The callback will be called with `thread` being the active thread, and
+    /// the callback may not change the active thread.
     fn register_timeout_callback(
         &mut self,
         thread: ThreadId,
@@ -383,8 +442,8 @@ fn register_timeout_callback(
         callback: TimeoutCallback<'mir, 'tcx>,
     ) {
         self.timeout_callbacks
-            .insert(thread, TimeoutCallbackInfo { call_time, callback })
-            .unwrap_none();
+            .try_insert(thread, TimeoutCallbackInfo { call_time, callback })
+            .unwrap();
     }
 
     /// Unregister the callback for the `thread`.
@@ -408,6 +467,48 @@ fn get_ready_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx
         None
     }
 
+    /// Wakes up threads joining on the active one and deallocates thread-local statics.
+    /// The `AllocId` that can now be freed are returned.
+    fn thread_terminated(
+        &mut self,
+        mut data_race: Option<&mut data_race::GlobalState>,
+    ) -> Vec<Pointer<Provenance>> {
+        let mut free_tls_statics = Vec::new();
+        {
+            let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut();
+            thread_local_statics.retain(|&(_def_id, thread), &mut alloc_id| {
+                if thread != self.active_thread {
+                    // Keep this static around.
+                    return true;
+                }
+                // Delete this static from the map and from memory.
+                // We cannot free directly here as we cannot use `?` in this context.
+                free_tls_statics.push(alloc_id);
+                false
+            });
+        }
+        // Set the thread into a terminated state in the data-race detector.
+        if let Some(ref mut data_race) = data_race {
+            data_race.thread_terminated(self);
+        }
+        // Check if we need to unblock any threads.
+        let mut joined_threads = vec![]; // store which threads joined, we'll need it
+        for (i, thread) in self.threads.iter_enumerated_mut() {
+            if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
+                // The thread has terminated, mark happens-before edge to joining thread
+                if data_race.is_some() {
+                    joined_threads.push(i);
+                }
+                trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
+                thread.state = ThreadState::Enabled;
+            }
+        }
+        for &i in &joined_threads {
+            data_race.as_mut().unwrap().thread_joined(self, i, self.active_thread);
+        }
+        free_tls_statics
+    }
+
     /// Decide which action to take next and on which thread.
     ///
     /// The currently implemented scheduling policy is the one that is commonly
@@ -419,41 +520,50 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
         // checks whether the thread has popped all its stack and if yes, sets
         // the thread state to terminated).
         if self.threads[self.active_thread].check_terminated() {
-            // Check if we need to unblock any threads.
-            for (i, thread) in self.threads.iter_enumerated_mut() {
-                if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
-                    trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
-                    thread.state = ThreadState::Enabled;
-                }
-            }
             return Ok(SchedulingAction::ExecuteDtors);
         }
+        // If we get here again and the thread is *still* terminated, there are no more dtors to run.
         if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
             // The main thread terminated; stop the program.
-            if self.threads.iter().any(|thread| thread.state != ThreadState::Terminated) {
-                // FIXME: This check should be either configurable or just emit
-                // a warning. For example, it seems normal for a program to
-                // terminate without waiting for its detached threads to
-                // terminate. However, this case is not trivial to support
-                // because we also probably do not want to consider the memory
-                // owned by these threads as leaked.
-                throw_unsup_format!("the main thread terminated without waiting for other threads");
-            }
+            // We do *not* run TLS dtors of remaining threads, which seems to match rustc behavior.
             return Ok(SchedulingAction::Stop);
         }
+        // This thread and the program can keep going.
         if self.threads[self.active_thread].state == ThreadState::Enabled
             && !self.yield_active_thread
         {
             // The currently active thread is still enabled, just continue with it.
             return Ok(SchedulingAction::ExecuteStep);
         }
-        // We need to pick a new thread for execution.
-        for (id, thread) in self.threads.iter_enumerated() {
+        // The active thread yielded. Let's see if there are any timeouts to take care of. We do
+        // this *before* running any other thread, to ensure that timeouts "in the past" fire before
+        // any other thread can take an action. This ensures that for `pthread_cond_timedwait`, "an
+        // error is returned if [...] the absolute time specified by abstime has already been passed
+        // at the time of the call".
+        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
+        let potential_sleep_time =
+            self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min();
+        if potential_sleep_time == Some(Duration::new(0, 0)) {
+            return Ok(SchedulingAction::ExecuteTimeoutCallback);
+        }
+        // No callbacks scheduled, pick a regular thread to execute.
+        // The active thread blocked or yielded. So we go search for another enabled thread.
+        // Curcially, we start searching at the current active thread ID, rather than at 0, since we
+        // want to avoid always scheduling threads 0 and 1 without ever making progress in thread 2.
+        //
+        // `skip(N)` means we start iterating at thread N, so we skip 1 more to start just *after*
+        // the active thread. Then after that we look at `take(N)`, i.e., the threads *before* the
+        // active thread.
+        let threads = self
+            .threads
+            .iter_enumerated()
+            .skip(self.active_thread.index() + 1)
+            .chain(self.threads.iter_enumerated().take(self.active_thread.index()));
+        for (id, thread) in threads {
+            debug_assert_ne!(self.active_thread, id);
             if thread.state == ThreadState::Enabled {
-                if !self.yield_active_thread || id != self.active_thread {
-                    self.active_thread = id;
-                    break;
-                }
+                self.active_thread = id;
+                break;
             }
         }
         self.yield_active_thread = false;
@@ -463,9 +573,7 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
         // We have not found a thread to execute.
         if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) {
             unreachable!("all threads terminated without the main thread terminating?!");
-        } else if let Some(sleep_time) =
-            self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min()
-        {
+        } else if let Some(sleep_time) = potential_sleep_time {
             // All threads are currently blocked, but we have unexecuted
             // timeout_callbacks, which may unblock some of the threads. Hence,
             // sleep until the first callback.
@@ -480,94 +588,44 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
 // Public interface to thread management.
 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
-    /// A workaround for thread-local statics until
-    /// https://github.com/rust-lang/rust/issues/70685 is fixed: change the
-    /// thread-local allocation id with a freshly generated allocation id for
-    /// the currently active thread.
-    fn remap_thread_local_alloc_ids(
-        &self,
-        val: &mut mir::interpret::ConstValue<'tcx>,
-    ) -> InterpResult<'tcx> {
-        let this = self.eval_context_ref();
-        match *val {
-            mir::interpret::ConstValue::Scalar(Scalar::Ptr(ref mut ptr)) => {
-                let alloc_id = ptr.alloc_id;
-                let alloc = this.tcx.get_global_alloc(alloc_id);
-                let tcx = this.tcx;
-                let is_thread_local = |def_id| {
-                    tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
-                };
-                match alloc {
-                    Some(GlobalAlloc::Static(def_id)) if is_thread_local(def_id) => {
-                        ptr.alloc_id = this.get_or_create_thread_local_alloc_id(def_id)?;
-                    }
-                    _ => {}
-                }
-            }
-            _ => {
-                // FIXME: Handling only `Scalar` seems to work for now, but at
-                // least in principle thread-locals could be in any constant, so
-                // we should also consider other cases. However, once
-                // https://github.com/rust-lang/rust/issues/70685 gets fixed,
-                // this code will have to be rewritten anyway.
-            }
-        }
-        Ok(())
-    }
-
     /// Get a thread-specific allocation id for the given thread-local static.
     /// If needed, allocate a new one.
-    ///
-    /// FIXME: This method should be replaced as soon as
-    /// https://github.com/rust-lang/rust/issues/70685 gets fixed.
-    fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tcx, AllocId> {
-        let this = self.eval_context_ref();
+    fn get_or_create_thread_local_alloc(
+        &mut self,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, Pointer<Provenance>> {
+        let this = self.eval_context_mut();
         let tcx = this.tcx;
-        if let Some(new_alloc_id) = this.machine.threads.get_thread_local_alloc_id(def_id) {
+        if let Some(old_alloc) = this.machine.threads.get_thread_local_alloc_id(def_id) {
             // We already have a thread-specific allocation id for this
             // thread-local static.
-            Ok(new_alloc_id)
+            Ok(old_alloc)
         } else {
             // We need to allocate a thread-specific allocation id for this
             // thread-local static.
-            //
-            // At first, we invoke the `const_eval_raw` query and extract the
-            // allocation from it. Unfortunately, we have to duplicate the code
-            // from `Memory::get_global_alloc` that does this.
-            //
-            // Then we store the retrieved allocation back into the `alloc_map`
-            // to get a fresh allocation id, which we can use as a
-            // thread-specific allocation id for the thread-local static.
+            // First, we compute the initial value for this static.
             if tcx.is_foreign_item(def_id) {
                 throw_unsup_format!("foreign thread-local statics are not supported");
             }
-            // Invoke the `const_eval_raw` query.
-            let instance = Instance::mono(tcx.tcx, def_id);
-            let gid = GlobalId { instance, promoted: None };
-            let raw_const =
-                tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
-                    // no need to report anything, the const_eval call takes care of that
-                    // for statics
-                    assert!(tcx.is_static(def_id));
-                    err
-                })?;
-            let id = raw_const.alloc_id;
-            // Extract the allocation from the query result.
-            let allocation = tcx.global_alloc(id).unwrap_memory();
-            // Create a new allocation id for the same allocation in this hacky
-            // way. Internally, `alloc_map` deduplicates allocations, but this
-            // is fine because Miri will make a copy before a first mutable
-            // access.
-            let new_alloc_id = tcx.create_memory_alloc(allocation);
-            this.machine.threads.set_thread_local_alloc_id(def_id, new_alloc_id);
-            Ok(new_alloc_id)
+            let allocation = tcx.eval_static_initializer(def_id)?;
+            let mut allocation = allocation.inner().clone();
+            // This allocation will be deallocated when the thread dies, so it is not in read-only memory.
+            allocation.mutability = Mutability::Mut;
+            // Create a fresh allocation with this content.
+            let new_alloc = this.allocate_raw_ptr(allocation, MiriMemoryKind::Tls.into())?;
+            this.machine.threads.set_thread_local_alloc(def_id, new_alloc);
+            Ok(new_alloc)
         }
     }
 
     #[inline]
-    fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> {
+    fn create_thread(&mut self) -> ThreadId {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.create_thread())
+        let id = this.machine.threads.create_thread();
+        if let Some(data_race) = &mut this.machine.data_race {
+            data_race.thread_created(&this.machine.threads, id);
+        }
+        id
     }
 
     #[inline]
@@ -579,84 +637,113 @@ fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
     #[inline]
     fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        this.machine.threads.join_thread(joined_thread_id)
+        this.machine.threads.join_thread(joined_thread_id, this.machine.data_race.as_mut())?;
+        Ok(())
+    }
+
+    #[inline]
+    fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
+        let this = self.eval_context_mut();
+        this.machine.threads.set_active_thread_id(thread_id)
     }
 
     #[inline]
-    fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> {
+    fn get_active_thread(&self) -> ThreadId {
+        let this = self.eval_context_ref();
+        this.machine.threads.get_active_thread_id()
+    }
+
+    #[inline]
+    fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.set_active_thread_id(thread_id))
+        this.machine.threads.active_thread_mut()
     }
 
     #[inline]
-    fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> {
+    fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.get_active_thread_id())
+        this.machine.threads.active_thread_ref()
     }
 
     #[inline]
-    fn get_total_thread_count(&self) -> InterpResult<'tcx, usize> {
+    fn get_total_thread_count(&self) -> usize {
+        let this = self.eval_context_ref();
+        this.machine.threads.get_total_thread_count()
+    }
+
+    #[inline]
+    fn has_terminated(&self, thread_id: ThreadId) -> bool {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.get_total_thread_count())
+        this.machine.threads.has_terminated(thread_id)
     }
 
     #[inline]
-    fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> {
+    fn have_all_terminated(&self) -> bool {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.has_terminated(thread_id))
+        this.machine.threads.have_all_terminated()
     }
 
     #[inline]
-    fn enable_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
+    fn enable_thread(&mut self, thread_id: ThreadId) {
         let this = self.eval_context_mut();
         this.machine.threads.enable_thread(thread_id);
-        Ok(())
     }
 
     #[inline]
-    fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] {
+    fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>] {
         let this = self.eval_context_ref();
         this.machine.threads.active_thread_stack()
     }
 
     #[inline]
-    fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>> {
+    fn active_thread_stack_mut(
+        &mut self,
+    ) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>> {
         let this = self.eval_context_mut();
         this.machine.threads.active_thread_stack_mut()
     }
 
     #[inline]
-    fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) -> InterpResult<'tcx, ()> {
+    fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.set_thread_name(new_thread_name))
+        this.machine.threads.set_active_thread_name(new_thread_name);
     }
 
     #[inline]
-    fn get_active_thread_name<'c>(&'c self) -> InterpResult<'tcx, &'c [u8]>
+    fn get_active_thread_name<'c>(&'c self) -> &'c [u8]
     where
         'mir: 'c,
     {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.get_thread_name())
+        this.machine.threads.get_active_thread_name()
     }
 
     #[inline]
-    fn block_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
+    fn block_thread(&mut self, thread: ThreadId) {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.block_thread(thread))
+        this.machine.threads.block_thread(thread);
     }
 
     #[inline]
-    fn unblock_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
+    fn unblock_thread(&mut self, thread: ThreadId) {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.unblock_thread(thread))
+        this.machine.threads.unblock_thread(thread);
     }
 
     #[inline]
-    fn yield_active_thread(&mut self) -> InterpResult<'tcx> {
+    fn yield_active_thread(&mut self) {
         let this = self.eval_context_mut();
         this.machine.threads.yield_active_thread();
-        Ok(())
+    }
+
+    #[inline]
+    fn maybe_preempt_active_thread(&mut self) {
+        use rand::Rng as _;
+
+        let this = self.eval_context_mut();
+        if this.machine.rng.get_mut().gen_bool(this.machine.preemption_rate) {
+            this.yield_active_thread();
+        }
     }
 
     #[inline]
@@ -665,17 +752,15 @@ fn register_timeout_callback(
         thread: ThreadId,
         call_time: Time,
         callback: TimeoutCallback<'mir, 'tcx>,
-    ) -> InterpResult<'tcx> {
+    ) {
         let this = self.eval_context_mut();
         this.machine.threads.register_timeout_callback(thread, call_time, callback);
-        Ok(())
     }
 
     #[inline]
-    fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
+    fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) {
         let this = self.eval_context_mut();
         this.machine.threads.unregister_timeout_callback_if_exists(thread);
-        Ok(())
     }
 
     /// Execute a timeout callback on the callback's thread.
@@ -683,10 +768,26 @@ fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) -> InterpR
     fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
         let (thread, callback) =
-            this.machine.threads.get_ready_callback().expect("no callback found");
-        let old_thread = this.set_active_thread(thread)?;
+            if let Some((thread, callback)) = this.machine.threads.get_ready_callback() {
+                (thread, callback)
+            } else {
+                // get_ready_callback can return None if the computer's clock
+                // was shifted after calling the scheduler and before the call
+                // to get_ready_callback (see issue
+                // https://github.com/rust-lang/miri/issues/1763). In this case,
+                // just do nothing, which effectively just returns to the
+                // scheduler.
+                return Ok(());
+            };
+        // This back-and-forth with `set_active_thread` is here because of two
+        // design decisions:
+        // 1. Make the caller and not the callback responsible for changing
+        //    thread.
+        // 2. Make the scheduler the only place that can change the active
+        //    thread.
+        let old_thread = this.set_active_thread(thread);
         callback(this)?;
-        this.set_active_thread(old_thread)?;
+        this.set_active_thread(old_thread);
         Ok(())
     }
 
@@ -696,4 +797,17 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
         let this = self.eval_context_mut();
         this.machine.threads.schedule()
     }
+
+    /// Handles thread termination of the active thread: wakes up threads joining on this one,
+    /// and deallocated thread-local statics.
+    ///
+    /// This is called from `tls.rs` after handling the TLS dtors.
+    #[inline]
+    fn thread_terminated(&mut self) -> InterpResult<'tcx> {
+        let this = self.eval_context_mut();
+        for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
+            this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
+        }
+        Ok(())
+    }
 }