]> git.lizzy.rs Git - rust.git/blobdiff - src/thread.rs
Add newlines at end of file + use replace.
[rust.git] / src / thread.rs
index 6ebf35a6527f565874e1649f8cd614948751b33d..08aeaa4fd095fc9bfb2ac050c49d9a723cb3de57 100644 (file)
@@ -3,19 +3,14 @@
 use std::cell::RefCell;
 use std::collections::hash_map::Entry;
 use std::convert::TryFrom;
-use std::num::{NonZeroU32, TryFromIntError};
-use std::time::Instant;
+use std::num::TryFromIntError;
+use std::time::{Duration, Instant, SystemTime};
 
 use log::trace;
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def_id::DefId;
 use rustc_index::vec::{Idx, IndexVec};
-use rustc_middle::{
-    middle::codegen_fn_attrs::CodegenFnAttrFlags,
-    mir,
-    ty::{self, Instance},
-};
 
 use crate::sync::SynchronizationState;
 use crate::*;
 pub enum SchedulingAction {
     /// Execute step on the active thread.
     ExecuteStep,
-    /// Execute a scheduler's callback.
-    ExecuteCallback,
+    /// Execute a timeout callback.
+    ExecuteTimeoutCallback,
     /// Execute destructors of the active thread.
     ExecuteDtors,
     /// Stop the program.
     Stop,
 }
 
-type EventCallback<'mir, 'tcx> =
+/// Timeout callbacks can be created by synchronization primitives to tell the
+/// scheduler that they should be called once some period of time passes.
+type TimeoutCallback<'mir, 'tcx> =
     Box<dyn FnOnce(&mut InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>) -> InterpResult<'tcx> + 'tcx>;
 
 /// A thread identifier.
@@ -77,21 +74,6 @@ pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
     }
 }
 
-/// An identifier of a set of blocked threads. 0 is used to indicate the absence
-/// of a blockset identifier and, therefore, is not a valid identifier.
-#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
-pub struct BlockSetId(NonZeroU32);
-
-impl BlockSetId {
-    /// Panics if `id` is 0.
-    pub fn new(id: u32) -> Self {
-        Self(NonZeroU32::new(id).expect("0 is not a valid blockset id"))
-    }
-    pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
-        Scalar::from_u32(self.0.get())
-    }
-}
-
 /// The state of a thread.
 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
 pub enum ThreadState {
@@ -100,11 +82,12 @@ pub enum ThreadState {
     /// The thread tried to join the specified thread and is blocked until that
     /// thread terminates.
     BlockedOnJoin(ThreadId),
-    /// The thread is blocked and belongs to the given blockset.
-    Blocked(BlockSetId),
-    BlockedThread,
-    /// The thread has terminated its execution (we do not delete terminated
-    /// threads).
+    /// The thread is blocked on some synchronization primitive. It is the
+    /// responsibility of the synchronization primitives to track threads that
+    /// are blocked by them.
+    BlockedOnSync,
+    /// The thread has terminated its execution. We do not delete terminated
+    /// threads (FIXME: why?).
     Terminated,
 }
 
@@ -123,12 +106,23 @@ enum ThreadJoinStatus {
 /// A thread.
 pub struct Thread<'mir, 'tcx> {
     state: ThreadState,
+
     /// Name of the thread.
     thread_name: Option<Vec<u8>>,
+
     /// The virtual call stack.
     stack: Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>>,
+
     /// The join status.
     join_status: ThreadJoinStatus,
+
+    /// The temporary used for storing the argument of
+    /// the call to `miri_start_panic` (the panic payload) when unwinding.
+    /// This is pointer-sized, and matches the `Payload` type in `src/libpanic_unwind/miri.rs`.
+    pub(crate) panic_payload: Option<Scalar<Tag>>,
+
+    /// Last OS error location in memory. It is a 32-bit integer.
+    pub(crate) last_error: Option<MPlaceTy<'tcx, Tag>>,
 }
 
 impl<'mir, 'tcx> Thread<'mir, 'tcx> {
@@ -167,6 +161,26 @@ fn default() -> Self {
             thread_name: None,
             stack: Vec::new(),
             join_status: ThreadJoinStatus::Joinable,
+            panic_payload: None,
+            last_error: None,
+        }
+    }
+}
+
+/// A specific moment in time.
+#[derive(Debug)]
+pub enum Time {
+    Monotonic(Instant),
+    RealTime(SystemTime),
+}
+
+impl Time {
+    /// How long do we have to wait from now until the specified time?
+    fn get_wait_time(&self) -> Duration {
+        match self {
+            Time::Monotonic(instant) => instant.saturating_duration_since(Instant::now()),
+            Time::RealTime(time) =>
+                time.duration_since(SystemTime::now()).unwrap_or(Duration::new(0, 0)),
         }
     }
 }
@@ -175,16 +189,16 @@ fn default() -> Self {
 /// conditional variable with a timeout creates a callback that is called after
 /// the specified time and unblocks the thread. If another thread signals on the
 /// conditional variable, the signal handler deletes the callback.
-struct CallBackInfo<'mir, 'tcx> {
+struct TimeoutCallbackInfo<'mir, 'tcx> {
     /// The callback should be called no earlier than this time.
-    call_time: Instant,
+    call_time: Time,
     /// The called function.
-    callback: EventCallback<'mir, 'tcx>,
+    callback: TimeoutCallback<'mir, 'tcx>,
 }
 
-impl<'mir, 'tcx> std::fmt::Debug for CallBackInfo<'mir, 'tcx> {
+impl<'mir, 'tcx> std::fmt::Debug for TimeoutCallbackInfo<'mir, 'tcx> {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        write!(f, "CallBack({:?})", self.call_time)
+        write!(f, "TimeoutCallback({:?})", self.call_time)
     }
 }
 
@@ -197,17 +211,16 @@ pub struct ThreadManager<'mir, 'tcx> {
     ///
     /// Note that this vector also contains terminated threads.
     threads: IndexVec<ThreadId, Thread<'mir, 'tcx>>,
-    /// FIXME: make private.
+    /// This field is pub(crate) because the synchronization primitives
+    /// (`crate::sync`) need a way to access it.
     pub(crate) sync: SynchronizationState,
-    /// A counter used to generate unique identifiers for blocksets.
-    blockset_counter: u32,
     /// A mapping from a thread-local static to an allocation id of a thread
     /// specific allocation.
     thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), AllocId>>,
     /// A flag that indicates that we should change the active thread.
     yield_active_thread: bool,
     /// Callbacks that are called once the specified time passes.
-    callbacks: FxHashMap<ThreadId, CallBackInfo<'mir, 'tcx>>,
+    timeout_callbacks: FxHashMap<ThreadId, TimeoutCallbackInfo<'mir, 'tcx>>,
 }
 
 impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
@@ -222,10 +235,9 @@ fn default() -> Self {
             active_thread: ThreadId::new(0),
             threads: threads,
             sync: SynchronizationState::default(),
-            blockset_counter: 0,
             thread_local_alloc_ids: Default::default(),
             yield_active_thread: false,
-            callbacks: FxHashMap::default(),
+            timeout_callbacks: FxHashMap::default(),
         }
     }
 }
@@ -315,7 +327,7 @@ fn detach_thread(&mut self, id: ThreadId) -> InterpResult<'tcx> {
     }
 
     /// Mark that the active thread tries to join the thread with `joined_thread_id`.
-    fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
+    fn join_thread(&mut self, joined_thread_id: ThreadId, data_race: &data_race::GlobalState) -> InterpResult<'tcx> {
         if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
             throw_ub_format!("trying to join a detached or already joined thread");
         }
@@ -339,6 +351,9 @@ fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
                 self.active_thread,
                 joined_thread_id
             );
+        }else{
+            // The thread has already terminated - mark join happens-before
+            data_race.thread_joined(self.active_thread, joined_thread_id);
         }
         Ok(())
     }
@@ -357,46 +372,52 @@ fn get_thread_name(&self) -> &[u8] {
     fn block_thread(&mut self, thread: ThreadId) {
         let state = &mut self.threads[thread].state;
         assert_eq!(*state, ThreadState::Enabled);
-        *state = ThreadState::BlockedThread;
+        *state = ThreadState::BlockedOnSync;
     }
 
     /// Put the blocked thread into the enabled state.
     fn unblock_thread(&mut self, thread: ThreadId) {
         let state = &mut self.threads[thread].state;
-        assert_eq!(*state, ThreadState::BlockedThread);
+        assert_eq!(*state, ThreadState::BlockedOnSync);
         *state = ThreadState::Enabled;
     }
 
     /// Change the active thread to some enabled thread.
     fn yield_active_thread(&mut self) {
+        // We do not yield immediately, as swapping out the current stack while executing a MIR statement
+        // could lead to all sorts of confusion.
+        // We should only switch stacks between steps.
         self.yield_active_thread = true;
     }
 
     /// Register the given `callback` to be called once the `call_time` passes.
-    fn register_callback(
+    ///
+    /// The callback will be called with `thread` being the active thread, and
+    /// the callback may not change the active thread.
+    fn register_timeout_callback(
         &mut self,
         thread: ThreadId,
-        call_time: Instant,
-        callback: EventCallback<'mir, 'tcx>,
+        call_time: Time,
+        callback: TimeoutCallback<'mir, 'tcx>,
     ) {
-        self.callbacks
-            .insert(thread, CallBackInfo { call_time: call_time, callback: callback })
+        self.timeout_callbacks
+            .insert(thread, TimeoutCallbackInfo { call_time, callback })
             .unwrap_none();
     }
 
     /// Unregister the callback for the `thread`.
-    fn unregister_callback_if_exists(&mut self, thread: ThreadId) {
-        self.callbacks.remove(&thread);
+    fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) {
+        self.timeout_callbacks.remove(&thread);
     }
 
     /// Get a callback that is ready to be called.
-    fn get_callback(&mut self) -> Option<(ThreadId, EventCallback<'mir, 'tcx>)> {
-        let current_time = Instant::now();
-        // We use a for loop here to make the scheduler more deterministic.
+    fn get_ready_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx>)> {
+        // We iterate over all threads in the order of their indices because
+        // this allows us to have a deterministic scheduler.
         for thread in self.threads.indices() {
-            match self.callbacks.entry(thread) {
+            match self.timeout_callbacks.entry(thread) {
                 Entry::Occupied(entry) =>
-                    if current_time >= entry.get().call_time {
+                    if entry.get().call_time.get_wait_time() == Duration::new(0, 0) {
                         return Some((thread, entry.remove().callback));
                     },
                 Entry::Vacant(_) => {}
@@ -405,24 +426,46 @@ fn get_callback(&mut self) -> Option<(ThreadId, EventCallback<'mir, 'tcx>)> {
         None
     }
 
+    /// Wakes up threads joining on the active one and deallocates thread-local statics.
+    /// The `AllocId` that can now be freed is returned.
+    fn thread_terminated(&mut self, data_race: &data_race::GlobalState) -> Vec<AllocId> {
+        let mut free_tls_statics = Vec::new();
+        {
+            let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut();
+            thread_local_statics.retain(|&(_def_id, thread), &mut alloc_id| {
+                if thread != self.active_thread {
+                    // Keep this static around.
+                    return true;
+                }
+                // Delete this static from the map and from memory.
+                // We cannot free directly here as we cannot use `?` in this context.
+                free_tls_statics.push(alloc_id);
+                return false;
+            });
+        }
+        // Check if we need to unblock any threads.
+        for (i, thread) in self.threads.iter_enumerated_mut() {
+            if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
+                // The thread has terminated, mark happens-before edge to joining thread
+                data_race.thread_joined(i, self.active_thread);
+                trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
+                thread.state = ThreadState::Enabled;
+            }
+        }
+        return free_tls_statics;
+    }
+
     /// Decide which action to take next and on which thread.
     ///
     /// The currently implemented scheduling policy is the one that is commonly
     /// used in stateless model checkers such as Loom: run the active thread as
     /// long as we can and switch only when we have to (the active thread was
     /// blocked, terminated, or has explicitly asked to be preempted).
-    fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
+    fn schedule(&mut self, data_race: &data_race::GlobalState) -> InterpResult<'tcx, SchedulingAction> {
         // Check whether the thread has **just** terminated (`check_terminated`
         // checks whether the thread has popped all its stack and if yes, sets
         // the thread state to terminated).
         if self.threads[self.active_thread].check_terminated() {
-            // Check if we need to unblock any threads.
-            for (i, thread) in self.threads.iter_enumerated_mut() {
-                if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
-                    trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
-                    thread.state = ThreadState::Enabled;
-                }
-            }
             return Ok(SchedulingAction::ExecuteDtors);
         }
         if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
@@ -438,6 +481,20 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
             }
             return Ok(SchedulingAction::Stop);
         }
+        // At least for `pthread_cond_timedwait` we need to report timeout when
+        // the function is called already after the specified time even if a
+        // signal is received before the thread gets scheduled. Therefore, we
+        // need to schedule all timeout callbacks before we continue regular
+        // execution.
+        //
+        // Documentation:
+        // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html#
+        let potential_sleep_time =
+            self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min();
+        if potential_sleep_time == Some(Duration::new(0, 0)) {
+            return Ok(SchedulingAction::ExecuteTimeoutCallback);
+        }
+        // No callbacks scheduled, pick a regular thread to execute.
         if self.threads[self.active_thread].state == ThreadState::Enabled
             && !self.yield_active_thread
         {
@@ -449,6 +506,7 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
             if thread.state == ThreadState::Enabled {
                 if !self.yield_active_thread || id != self.active_thread {
                     self.active_thread = id;
+                    data_race.thread_set_active(self.active_thread);
                     break;
                 }
             }
@@ -459,19 +517,13 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
         }
         // We have not found a thread to execute.
         if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) {
-            unreachable!();
-        } else if let Some(next_call_time) =
-            self.callbacks.values().min_by_key(|info| info.call_time)
-        {
+            unreachable!("all threads terminated without the main thread terminating?!");
+        } else if let Some(sleep_time) = potential_sleep_time {
             // All threads are currently blocked, but we have unexecuted
-            // callbacks, which may unblock some of the threads. Hence,
+            // timeout_callbacks, which may unblock some of the threads. Hence,
             // sleep until the first callback.
-            if let Some(sleep_time) =
-                next_call_time.call_time.checked_duration_since(Instant::now())
-            {
-                std::thread::sleep(sleep_time);
-            }
-            Ok(SchedulingAction::ExecuteCallback)
+            std::thread::sleep(sleep_time);
+            Ok(SchedulingAction::ExecuteTimeoutCallback)
         } else {
             throw_machine_stop!(TerminationInfo::Deadlock);
         }
@@ -481,48 +533,10 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
 // Public interface to thread management.
 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
-    /// A workaround for thread-local statics until
-    /// https://github.com/rust-lang/rust/issues/70685 is fixed: change the
-    /// thread-local allocation id with a freshly generated allocation id for
-    /// the currently active thread.
-    fn remap_thread_local_alloc_ids(
-        &self,
-        val: &mut mir::interpret::ConstValue<'tcx>,
-    ) -> InterpResult<'tcx> {
-        let this = self.eval_context_ref();
-        match *val {
-            mir::interpret::ConstValue::Scalar(Scalar::Ptr(ref mut ptr)) => {
-                let alloc_id = ptr.alloc_id;
-                let alloc = this.tcx.get_global_alloc(alloc_id);
-                let tcx = this.tcx;
-                let is_thread_local = |def_id| {
-                    tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
-                };
-                match alloc {
-                    Some(GlobalAlloc::Static(def_id)) if is_thread_local(def_id) => {
-                        ptr.alloc_id = this.get_or_create_thread_local_alloc_id(def_id)?;
-                    }
-                    _ => {}
-                }
-            }
-            _ => {
-                // FIXME: Handling only `Scalar` seems to work for now, but at
-                // least in principle thread-locals could be in any constant, so
-                // we should also consider other cases. However, once
-                // https://github.com/rust-lang/rust/issues/70685 gets fixed,
-                // this code will have to be rewritten anyway.
-            }
-        }
-        Ok(())
-    }
-
     /// Get a thread-specific allocation id for the given thread-local static.
     /// If needed, allocate a new one.
-    ///
-    /// FIXME: This method should be replaced as soon as
-    /// https://github.com/rust-lang/rust/issues/70685 gets fixed.
-    fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tcx, AllocId> {
-        let this = self.eval_context_ref();
+    fn get_or_create_thread_local_alloc_id(&mut self, def_id: DefId) -> InterpResult<'tcx, AllocId> {
+        let this = self.eval_context_mut();
         let tcx = this.tcx;
         if let Some(new_alloc_id) = this.machine.threads.get_thread_local_alloc_id(def_id) {
             // We already have a thread-specific allocation id for this
@@ -531,44 +545,24 @@ fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tc
         } else {
             // We need to allocate a thread-specific allocation id for this
             // thread-local static.
-            //
-            // At first, we invoke the `const_eval_raw` query and extract the
-            // allocation from it. Unfortunately, we have to duplicate the code
-            // from `Memory::get_global_alloc` that does this.
-            //
-            // Then we store the retrieved allocation back into the `alloc_map`
-            // to get a fresh allocation id, which we can use as a
-            // thread-specific allocation id for the thread-local static.
+            // First, we compute the initial value for this static.
             if tcx.is_foreign_item(def_id) {
                 throw_unsup_format!("foreign thread-local statics are not supported");
             }
-            // Invoke the `const_eval_raw` query.
-            let instance = Instance::mono(tcx.tcx, def_id);
-            let gid = GlobalId { instance, promoted: None };
-            let raw_const =
-                tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
-                    // no need to report anything, the const_eval call takes care of that
-                    // for statics
-                    assert!(tcx.is_static(def_id));
-                    err
-                })?;
-            let id = raw_const.alloc_id;
-            // Extract the allocation from the query result.
-            let allocation = tcx.global_alloc(id).unwrap_memory();
-            // Create a new allocation id for the same allocation in this hacky
-            // way. Internally, `alloc_map` deduplicates allocations, but this
-            // is fine because Miri will make a copy before a first mutable
-            // access.
-            let new_alloc_id = tcx.create_memory_alloc(allocation);
+            let allocation = tcx.eval_static_initializer(def_id)?;
+            // Create a fresh allocation with this content.
+            let new_alloc_id = this.memory.allocate_with(allocation.clone(), MiriMemoryKind::Tls.into()).alloc_id;
             this.machine.threads.set_thread_local_alloc_id(def_id, new_alloc_id);
             Ok(new_alloc_id)
         }
     }
 
     #[inline]
-    fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> {
+    fn create_thread(&mut self) -> ThreadId {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.create_thread())
+        let id = this.machine.threads.create_thread();
+        this.memory.extra.data_race.thread_created(id);
+        id
     }
 
     #[inline]
@@ -580,38 +574,52 @@ fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
     #[inline]
     fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        this.machine.threads.join_thread(joined_thread_id)
+        let data_race = &*this.memory.extra.data_race;
+        this.machine.threads.join_thread(joined_thread_id, data_race)?;
+        Ok(())
     }
 
     #[inline]
-    fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> {
+    fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.set_active_thread_id(thread_id))
+        this.memory.extra.data_race.thread_set_active(thread_id);
+        this.machine.threads.set_active_thread_id(thread_id)
     }
 
     #[inline]
-    fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> {
+    fn get_active_thread(&self) -> ThreadId {
+        let this = self.eval_context_ref();
+        this.machine.threads.get_active_thread_id()
+    }
+
+    #[inline]
+    fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
+        let this = self.eval_context_mut();
+        this.machine.threads.active_thread_mut()
+    }
+
+    #[inline]
+    fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.get_active_thread_id())
+        this.machine.threads.active_thread_ref()
     }
 
     #[inline]
-    fn get_total_thread_count(&self) -> InterpResult<'tcx, usize> {
+    fn get_total_thread_count(&self) -> usize {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.get_total_thread_count())
+        this.machine.threads.get_total_thread_count()
     }
 
     #[inline]
-    fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> {
+    fn has_terminated(&self, thread_id: ThreadId) -> bool {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.has_terminated(thread_id))
+        this.machine.threads.has_terminated(thread_id)
     }
 
     #[inline]
-    fn enable_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
+    fn enable_thread(&mut self, thread_id: ThreadId) {
         let this = self.eval_context_mut();
         this.machine.threads.enable_thread(thread_id);
-        Ok(())
     }
 
     #[inline]
@@ -627,66 +635,73 @@ fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameDa
     }
 
     #[inline]
-    fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) -> InterpResult<'tcx, ()> {
+    fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.set_thread_name(new_thread_name))
+        if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
+            this.memory.extra.data_race.thread_set_name(string);
+        }
+        this.machine.threads.set_thread_name(new_thread_name);
     }
 
     #[inline]
-    fn get_active_thread_name<'c>(&'c self) -> InterpResult<'tcx, &'c [u8]>
+    fn get_active_thread_name<'c>(&'c self) -> &'c [u8]
     where
         'mir: 'c,
     {
         let this = self.eval_context_ref();
-        Ok(this.machine.threads.get_thread_name())
+        this.machine.threads.get_thread_name()
     }
 
     #[inline]
-    fn block_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
+    fn block_thread(&mut self, thread: ThreadId) {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.block_thread(thread))
+        this.machine.threads.block_thread(thread);
     }
 
     #[inline]
-    fn unblock_thread(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
+    fn unblock_thread(&mut self, thread: ThreadId) {
         let this = self.eval_context_mut();
-        Ok(this.machine.threads.unblock_thread(thread))
+        this.machine.threads.unblock_thread(thread);
     }
 
     #[inline]
-    fn yield_active_thread(&mut self) -> InterpResult<'tcx> {
+    fn yield_active_thread(&mut self) {
         let this = self.eval_context_mut();
         this.machine.threads.yield_active_thread();
-        Ok(())
     }
 
     #[inline]
-    fn register_callback(
+    fn register_timeout_callback(
         &mut self,
         thread: ThreadId,
-        call_time: Instant,
-        callback: EventCallback<'mir, 'tcx>,
-    ) -> InterpResult<'tcx> {
+        call_time: Time,
+        callback: TimeoutCallback<'mir, 'tcx>,
+    ) {
         let this = self.eval_context_mut();
-        this.machine.threads.register_callback(thread, call_time, callback);
-        Ok(())
+        this.machine.threads.register_timeout_callback(thread, call_time, callback);
     }
 
     #[inline]
-    fn unregister_callback_if_exists(&mut self, thread: ThreadId) -> InterpResult<'tcx> {
+    fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) {
         let this = self.eval_context_mut();
-        this.machine.threads.unregister_callback_if_exists(thread);
-        Ok(())
+        this.machine.threads.unregister_timeout_callback_if_exists(thread);
     }
 
-    /// Execute the callback on the callback's thread.
+    /// Execute a timeout callback on the callback's thread.
     #[inline]
-    fn run_scheduler_callback(&mut self) -> InterpResult<'tcx> {
+    fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        let (thread, callback) = this.machine.threads.get_callback().expect("no callback found");
-        let old_thread = this.set_active_thread(thread)?;
+        let (thread, callback) =
+            this.machine.threads.get_ready_callback().expect("no callback found");
+        // This back-and-forth with `set_active_thread` is here because of two
+        // design decisions:
+        // 1. Make the caller and not the callback responsible for changing
+        //    thread.
+        // 2. Make the scheduler the only place that can change the active
+        //    thread.
+        let old_thread = this.set_active_thread(thread);
         callback(this)?;
-        this.set_active_thread(old_thread)?;
+        this.set_active_thread(old_thread);
         Ok(())
     }
 
@@ -694,6 +709,22 @@ fn run_scheduler_callback(&mut self) -> InterpResult<'tcx> {
     #[inline]
     fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
         let this = self.eval_context_mut();
-        this.machine.threads.schedule()
+        let data_race = &*this.memory.extra.data_race;
+        this.machine.threads.schedule(data_race)
+    }
+
+    /// Handles thread termination of the active thread: wakes up threads joining on this one,
+    /// and deallocated thread-local statics.
+    ///
+    /// This is called from `tls.rs` after handling the TLS dtors.
+    #[inline]
+    fn thread_terminated(&mut self) -> InterpResult<'tcx> {
+        let this = self.eval_context_mut();
+        let data_race = &*this.memory.extra.data_race;
+        for alloc_id in this.machine.threads.thread_terminated(data_race) {
+            let ptr = this.memory.global_base_pointer(alloc_id.into())?;
+            this.memory.deallocate(ptr, None, MiriMemoryKind::Tls.into())?;
+        }
+        Ok(())
     }
 }