]> git.lizzy.rs Git - rust.git/blobdiff - src/thread.rs
Typo
[rust.git] / src / thread.rs
index 4fe44ef9d4a7d50832f5a7cfe3393f06cfcb4beb..2135806de3ed56853f233519f3ebcf28c00b3ffb 100644 (file)
@@ -2,8 +2,6 @@
 
 use std::cell::RefCell;
 use std::collections::hash_map::Entry;
-use std::convert::TryFrom;
-use std::rc::Rc;
 use std::num::TryFromIntError;
 use std::time::{Duration, Instant, SystemTime};
 
@@ -12,7 +10,9 @@
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def_id::DefId;
 use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::Mutability;
 
+use crate::concurrency::data_race;
 use crate::sync::SynchronizationState;
 use crate::*;
 
@@ -59,7 +59,7 @@ fn index(self) -> usize {
 impl TryFrom<u64> for ThreadId {
     type Error = TryFromIntError;
     fn try_from(id: u64) -> Result<Self, Self::Error> {
-        u32::try_from(id).map(|id_u32| Self(id_u32))
+        u32::try_from(id).map(Self)
     }
 }
 
@@ -70,8 +70,8 @@ fn from(id: u32) -> Self {
 }
 
 impl ThreadId {
-    pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
-        Scalar::from_u32(u32::try_from(self.0).unwrap())
+    pub fn to_u32_scalar(&self) -> Scalar<Tag> {
+        Scalar::from_u32(self.0)
     }
 }
 
@@ -141,17 +141,19 @@ fn check_terminated(&mut self) -> bool {
 
     /// Get the name of the current thread, or `<unnamed>` if it was not set.
     fn thread_name(&self) -> &[u8] {
-        if let Some(ref thread_name) = self.thread_name {
-            thread_name
-        } else {
-            b"<unnamed>"
-        }
+        if let Some(ref thread_name) = self.thread_name { thread_name } else { b"<unnamed>" }
     }
 }
 
 impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        write!(f, "{}({:?}, {:?})", String::from_utf8_lossy(self.thread_name()), self.state, self.join_status)
+        write!(
+            f,
+            "{}({:?}, {:?})",
+            String::from_utf8_lossy(self.thread_name()),
+            self.state,
+            self.join_status
+        )
     }
 }
 
@@ -217,7 +219,7 @@ pub struct ThreadManager<'mir, 'tcx> {
     pub(crate) sync: SynchronizationState,
     /// A mapping from a thread-local static to an allocation id of a thread
     /// specific allocation.
-    thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), AllocId>>,
+    thread_local_alloc_ids: RefCell<FxHashMap<(DefId, ThreadId), Pointer<Tag>>>,
     /// A flag that indicates that we should change the active thread.
     yield_active_thread: bool,
     /// Callbacks that are called once the specified time passes.
@@ -234,7 +236,7 @@ fn default() -> Self {
         threads.push(main_thread);
         Self {
             active_thread: ThreadId::new(0),
-            threads: threads,
+            threads,
             sync: SynchronizationState::default(),
             thread_local_alloc_ids: Default::default(),
             yield_active_thread: false,
@@ -246,23 +248,23 @@ fn default() -> Self {
 impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
     /// Check if we have an allocation for the given thread local static for the
     /// active thread.
-    fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<AllocId> {
+    fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<Pointer<Tag>> {
         self.thread_local_alloc_ids.borrow().get(&(def_id, self.active_thread)).cloned()
     }
 
-    /// Set the allocation id as the allocation id of the given thread local
+    /// Set the pointer for the allocation of the given thread local
     /// static for the active thread.
     ///
     /// Panics if a thread local is initialized twice for the same thread.
-    fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) {
+    fn set_thread_local_alloc(&self, def_id: DefId, ptr: Pointer<Tag>) {
         self.thread_local_alloc_ids
             .borrow_mut()
-            .try_insert((def_id, self.active_thread), new_alloc_id)
+            .try_insert((def_id, self.active_thread), ptr)
             .unwrap();
     }
 
     /// Borrow the stack of the active thread.
-    fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] {
+    pub fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] {
         &self.threads[self.active_thread].stack
     }
 
@@ -301,6 +303,11 @@ fn has_terminated(&self, thread_id: ThreadId) -> bool {
         self.threads[thread_id].state == ThreadState::Terminated
     }
 
+    /// Have all threads terminated?
+    fn have_all_terminated(&self) -> bool {
+        self.threads.iter().all(|thread| thread.state == ThreadState::Terminated)
+    }
+
     /// Enable the thread for execution. The thread must be terminated.
     fn enable_thread(&mut self, thread_id: ThreadId) {
         assert!(self.has_terminated(thread_id));
@@ -328,7 +335,11 @@ fn detach_thread(&mut self, id: ThreadId) -> InterpResult<'tcx> {
     }
 
     /// Mark that the active thread tries to join the thread with `joined_thread_id`.
-    fn join_thread(&mut self, joined_thread_id: ThreadId, data_race: &Option<Rc<data_race::GlobalState>>) -> InterpResult<'tcx> {
+    fn join_thread(
+        &mut self,
+        joined_thread_id: ThreadId,
+        data_race: Option<&mut data_race::GlobalState>,
+    ) -> InterpResult<'tcx> {
         if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
             throw_ub_format!("trying to join a detached or already joined thread");
         }
@@ -430,8 +441,11 @@ fn get_ready_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx
     }
 
     /// Wakes up threads joining on the active one and deallocates thread-local statics.
-    /// The `AllocId` that can now be freed is returned.
-    fn thread_terminated(&mut self, data_race: &Option<Rc<data_race::GlobalState>>) -> Vec<AllocId> {
+    /// The `AllocId` that can now be freed are returned.
+    fn thread_terminated(
+        &mut self,
+        mut data_race: Option<&mut data_race::GlobalState>,
+    ) -> Vec<Pointer<Tag>> {
         let mut free_tls_statics = Vec::new();
         {
             let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut();
@@ -443,25 +457,25 @@ fn thread_terminated(&mut self, data_race: &Option<Rc<data_race::GlobalState>>)
                 // Delete this static from the map and from memory.
                 // We cannot free directly here as we cannot use `?` in this context.
                 free_tls_statics.push(alloc_id);
-                return false;
+                false
             });
         }
         // Set the thread into a terminated state in the data-race detector
-        if let Some(data_race) = data_race {
+        if let Some(ref mut data_race) = data_race {
             data_race.thread_terminated();
         }
         // Check if we need to unblock any threads.
         for (i, thread) in self.threads.iter_enumerated_mut() {
             if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
                 // The thread has terminated, mark happens-before edge to joining thread
-                if let Some(data_race) = data_race {
+                if let Some(ref mut data_race) = data_race {
                     data_race.thread_joined(i, self.active_thread);
                 }
                 trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
                 thread.state = ThreadState::Enabled;
             }
         }
-        return free_tls_statics;
+        free_tls_statics
     }
 
     /// Decide which action to take next and on which thread.
@@ -470,56 +484,61 @@ fn thread_terminated(&mut self, data_race: &Option<Rc<data_race::GlobalState>>)
     /// used in stateless model checkers such as Loom: run the active thread as
     /// long as we can and switch only when we have to (the active thread was
     /// blocked, terminated, or has explicitly asked to be preempted).
-    fn schedule(&mut self, data_race: &Option<Rc<data_race::GlobalState>>) -> InterpResult<'tcx, SchedulingAction> {
+    fn schedule(
+        &mut self,
+        data_race: &Option<data_race::GlobalState>,
+    ) -> InterpResult<'tcx, SchedulingAction> {
         // Check whether the thread has **just** terminated (`check_terminated`
         // checks whether the thread has popped all its stack and if yes, sets
         // the thread state to terminated).
         if self.threads[self.active_thread].check_terminated() {
             return Ok(SchedulingAction::ExecuteDtors);
         }
+        // If we get here again and the thread is *still* terminated, there are no more dtors to run.
         if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
             // The main thread terminated; stop the program.
-            if self.threads.iter().any(|thread| thread.state != ThreadState::Terminated) {
-                // FIXME: This check should be either configurable or just emit
-                // a warning. For example, it seems normal for a program to
-                // terminate without waiting for its detached threads to
-                // terminate. However, this case is not trivial to support
-                // because we also probably do not want to consider the memory
-                // owned by these threads as leaked.
-                throw_unsup_format!("the main thread terminated without waiting for other threads");
-            }
+            // We do *not* run TLS dtors of remaining threads, which seems to match rustc behavior.
             return Ok(SchedulingAction::Stop);
         }
-        // At least for `pthread_cond_timedwait` we need to report timeout when
-        // the function is called already after the specified time even if a
-        // signal is received before the thread gets scheduled. Therefore, we
-        // need to schedule all timeout callbacks before we continue regular
-        // execution.
-        //
-        // Documentation:
-        // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html#
-        let potential_sleep_time =
-            self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min();
-        if potential_sleep_time == Some(Duration::new(0, 0)) {
-            return Ok(SchedulingAction::ExecuteTimeoutCallback);
-        }
-        // No callbacks scheduled, pick a regular thread to execute.
+        // This thread and the program can keep going.
         if self.threads[self.active_thread].state == ThreadState::Enabled
             && !self.yield_active_thread
         {
             // The currently active thread is still enabled, just continue with it.
             return Ok(SchedulingAction::ExecuteStep);
         }
-        // We need to pick a new thread for execution.
-        for (id, thread) in self.threads.iter_enumerated() {
+        // The active thread yielded. Let's see if there are any timeouts to take care of. We do
+        // this *before* running any other thread, to ensure that timeouts "in the past" fire before
+        // any other thread can take an action. This ensures that for `pthread_cond_timedwait`, "an
+        // error is returned if [...] the absolute time specified by abstime has already been passed
+        // at the time of the call".
+        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
+        let potential_sleep_time =
+            self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min();
+        if potential_sleep_time == Some(Duration::new(0, 0)) {
+            return Ok(SchedulingAction::ExecuteTimeoutCallback);
+        }
+        // No callbacks scheduled, pick a regular thread to execute.
+        // The active thread blocked or yielded. So we go search for another enabled thread.
+        // Curcially, we start searching at the current active thread ID, rather than at 0, since we
+        // want to avoid always scheduling threads 0 and 1 without ever making progress in thread 2.
+        //
+        // `skip(N)` means we start iterating at thread N, so we skip 1 more to start just *after*
+        // the active thread. Then after that we look at `take(N)`, i.e., the threads *before* the
+        // active thread.
+        let threads = self
+            .threads
+            .iter_enumerated()
+            .skip(self.active_thread.index() + 1)
+            .chain(self.threads.iter_enumerated().take(self.active_thread.index()));
+        for (id, thread) in threads {
+            debug_assert_ne!(self.active_thread, id);
             if thread.state == ThreadState::Enabled {
-                if !self.yield_active_thread || id != self.active_thread {
-                    self.active_thread = id;
-                    if let Some(data_race) = data_race {
-                        data_race.thread_set_active(self.active_thread);
-                    }
-                    break;
+                self.active_thread = id;
+                if let Some(data_race) = data_race {
+                    data_race.thread_set_active(self.active_thread);
                 }
+                break;
             }
         }
         self.yield_active_thread = false;
@@ -546,13 +565,16 @@ impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mi
 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
     /// Get a thread-specific allocation id for the given thread-local static.
     /// If needed, allocate a new one.
-    fn get_or_create_thread_local_alloc_id(&mut self, def_id: DefId) -> InterpResult<'tcx, AllocId> {
+    fn get_or_create_thread_local_alloc(
+        &mut self,
+        def_id: DefId,
+    ) -> InterpResult<'tcx, Pointer<Tag>> {
         let this = self.eval_context_mut();
         let tcx = this.tcx;
-        if let Some(new_alloc_id) = this.machine.threads.get_thread_local_alloc_id(def_id) {
+        if let Some(old_alloc) = this.machine.threads.get_thread_local_alloc_id(def_id) {
             // We already have a thread-specific allocation id for this
             // thread-local static.
-            Ok(new_alloc_id)
+            Ok(old_alloc)
         } else {
             // We need to allocate a thread-specific allocation id for this
             // thread-local static.
@@ -561,10 +583,13 @@ fn get_or_create_thread_local_alloc_id(&mut self, def_id: DefId) -> InterpResult
                 throw_unsup_format!("foreign thread-local statics are not supported");
             }
             let allocation = tcx.eval_static_initializer(def_id)?;
+            let mut allocation = allocation.inner().clone();
+            // This allocation will be deallocated when the thread dies, so it is not in read-only memory.
+            allocation.mutability = Mutability::Mut;
             // Create a fresh allocation with this content.
-            let new_alloc_id = this.memory.allocate_with(allocation.clone(), MiriMemoryKind::Tls.into()).alloc_id;
-            this.machine.threads.set_thread_local_alloc_id(def_id, new_alloc_id);
-            Ok(new_alloc_id)
+            let new_alloc = this.allocate_raw_ptr(allocation, MiriMemoryKind::Tls.into())?;
+            this.machine.threads.set_thread_local_alloc(def_id, new_alloc);
+            Ok(new_alloc)
         }
     }
 
@@ -572,7 +597,7 @@ fn get_or_create_thread_local_alloc_id(&mut self, def_id: DefId) -> InterpResult
     fn create_thread(&mut self) -> ThreadId {
         let this = self.eval_context_mut();
         let id = this.machine.threads.create_thread();
-        if let Some(data_race) = &this.memory.extra.data_race {
+        if let Some(data_race) = &mut this.machine.data_race {
             data_race.thread_created(id);
         }
         id
@@ -587,15 +612,14 @@ fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
     #[inline]
     fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        let data_race = &this.memory.extra.data_race;
-        this.machine.threads.join_thread(joined_thread_id, data_race)?;
+        this.machine.threads.join_thread(joined_thread_id, this.machine.data_race.as_mut())?;
         Ok(())
     }
 
     #[inline]
     fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
         let this = self.eval_context_mut();
-        if let Some(data_race) = &this.memory.extra.data_race {
+        if let Some(data_race) = &this.machine.data_race {
             data_race.thread_set_active(thread_id);
         }
         this.machine.threads.set_active_thread_id(thread_id)
@@ -631,6 +655,12 @@ fn has_terminated(&self, thread_id: ThreadId) -> bool {
         this.machine.threads.has_terminated(thread_id)
     }
 
+    #[inline]
+    fn have_all_terminated(&self) -> bool {
+        let this = self.eval_context_ref();
+        this.machine.threads.have_all_terminated()
+    }
+
     #[inline]
     fn enable_thread(&mut self, thread_id: ThreadId) {
         let this = self.eval_context_mut();
@@ -652,11 +682,9 @@ fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameDa
     #[inline]
     fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
         let this = self.eval_context_mut();
-        if let Some(data_race) = &this.memory.extra.data_race {
+        if let Some(data_race) = &mut this.machine.data_race {
             if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
-                data_race.thread_set_name(
-                    this.machine.threads.active_thread, string
-                );
+                data_race.thread_set_name(this.machine.threads.active_thread, string);
             }
         }
         this.machine.threads.set_thread_name(new_thread_name);
@@ -689,6 +717,16 @@ fn yield_active_thread(&mut self) {
         this.machine.threads.yield_active_thread();
     }
 
+    #[inline]
+    fn maybe_preempt_active_thread(&mut self) {
+        use rand::Rng as _;
+
+        let this = self.eval_context_mut();
+        if this.machine.rng.get_mut().gen_bool(this.machine.preemption_rate) {
+            this.yield_active_thread();
+        }
+    }
+
     #[inline]
     fn register_timeout_callback(
         &mut self,
@@ -711,7 +749,17 @@ fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) {
     fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
         let (thread, callback) =
-            this.machine.threads.get_ready_callback().expect("no callback found");
+            if let Some((thread, callback)) = this.machine.threads.get_ready_callback() {
+                (thread, callback)
+            } else {
+                // get_ready_callback can return None if the computer's clock
+                // was shifted after calling the scheduler and before the call
+                // to get_ready_callback (see issue
+                // https://github.com/rust-lang/miri/issues/1763). In this case,
+                // just do nothing, which effectively just returns to the
+                // scheduler.
+                return Ok(());
+            };
         // This back-and-forth with `set_active_thread` is here because of two
         // design decisions:
         // 1. Make the caller and not the callback responsible for changing
@@ -728,7 +776,7 @@ fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
     #[inline]
     fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
         let this = self.eval_context_mut();
-        let data_race = &this.memory.extra.data_race;
+        let data_race = &this.machine.data_race;
         this.machine.threads.schedule(data_race)
     }
 
@@ -739,10 +787,8 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
     #[inline]
     fn thread_terminated(&mut self) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        let data_race = &this.memory.extra.data_race;
-        for alloc_id in this.machine.threads.thread_terminated(data_race) {
-            let ptr = this.memory.global_base_pointer(alloc_id.into())?;
-            this.memory.deallocate(ptr, None, MiriMemoryKind::Tls.into())?;
+        for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
+            this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
         }
         Ok(())
     }