]> git.lizzy.rs Git - rust.git/blobdiff - src/thread.rs
only check timeouts when a thread yields
[rust.git] / src / thread.rs
index 42a4dbded58f4f33ea6f7d82b9373dcc70fd1d44..0f373e4cd31eb15cd25e38b1de365fce152776a7 100644 (file)
@@ -3,6 +3,7 @@
 use std::cell::RefCell;
 use std::collections::hash_map::Entry;
 use std::convert::TryFrom;
+use std::rc::Rc;
 use std::num::TryFromIntError;
 use std::time::{Duration, Instant, SystemTime};
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir::def_id::DefId;
 use rustc_index::vec::{Idx, IndexVec};
-use rustc_middle::{
-    middle::codegen_fn_attrs::CodegenFnAttrFlags,
-    mir,
-    ty::{self, Instance},
-};
 
 use crate::sync::SynchronizationState;
 use crate::*;
@@ -111,12 +107,23 @@ enum ThreadJoinStatus {
 /// A thread.
 pub struct Thread<'mir, 'tcx> {
     state: ThreadState,
+
     /// Name of the thread.
     thread_name: Option<Vec<u8>>,
+
     /// The virtual call stack.
     stack: Vec<Frame<'mir, 'tcx, Tag, FrameData<'tcx>>>,
+
     /// The join status.
     join_status: ThreadJoinStatus,
+
+    /// The temporary used for storing the argument of
+    /// the call to `miri_start_panic` (the panic payload) when unwinding.
+    /// This is pointer-sized, and matches the `Payload` type in `src/libpanic_unwind/miri.rs`.
+    pub(crate) panic_payload: Option<Scalar<Tag>>,
+
+    /// Last OS error location in memory. It is a 32-bit integer.
+    pub(crate) last_error: Option<MPlaceTy<'tcx, Tag>>,
 }
 
 impl<'mir, 'tcx> Thread<'mir, 'tcx> {
@@ -155,6 +162,8 @@ fn default() -> Self {
             thread_name: None,
             stack: Vec::new(),
             join_status: ThreadJoinStatus::Joinable,
+            panic_payload: None,
+            last_error: None,
         }
     }
 }
@@ -248,8 +257,8 @@ fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<AllocId> {
     fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) {
         self.thread_local_alloc_ids
             .borrow_mut()
-            .insert((def_id, self.active_thread), new_alloc_id)
-            .unwrap_none();
+            .try_insert((def_id, self.active_thread), new_alloc_id)
+            .unwrap();
     }
 
     /// Borrow the stack of the active thread.
@@ -319,7 +328,7 @@ fn detach_thread(&mut self, id: ThreadId) -> InterpResult<'tcx> {
     }
 
     /// Mark that the active thread tries to join the thread with `joined_thread_id`.
-    fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
+    fn join_thread(&mut self, joined_thread_id: ThreadId, data_race: &Option<Rc<data_race::GlobalState>>) -> InterpResult<'tcx> {
         if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
             throw_ub_format!("trying to join a detached or already joined thread");
         }
@@ -343,6 +352,11 @@ fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
                 self.active_thread,
                 joined_thread_id
             );
+        } else {
+            // The thread has already terminated - mark join happens-before
+            if let Some(data_race) = data_race {
+                data_race.thread_joined(self.active_thread, joined_thread_id);
+            }
         }
         Ok(())
     }
@@ -390,8 +404,8 @@ fn register_timeout_callback(
         callback: TimeoutCallback<'mir, 'tcx>,
     ) {
         self.timeout_callbacks
-            .insert(thread, TimeoutCallbackInfo { call_time, callback })
-            .unwrap_none();
+            .try_insert(thread, TimeoutCallbackInfo { call_time, callback })
+            .unwrap();
     }
 
     /// Unregister the callback for the `thread`.
@@ -415,26 +429,55 @@ fn get_ready_callback(&mut self) -> Option<(ThreadId, TimeoutCallback<'mir, 'tcx
         None
     }
 
+    /// Wakes up threads joining on the active one and deallocates thread-local statics.
+    /// The `AllocId` that can now be freed is returned.
+    fn thread_terminated(&mut self, data_race: &Option<Rc<data_race::GlobalState>>) -> Vec<AllocId> {
+        let mut free_tls_statics = Vec::new();
+        {
+            let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut();
+            thread_local_statics.retain(|&(_def_id, thread), &mut alloc_id| {
+                if thread != self.active_thread {
+                    // Keep this static around.
+                    return true;
+                }
+                // Delete this static from the map and from memory.
+                // We cannot free directly here as we cannot use `?` in this context.
+                free_tls_statics.push(alloc_id);
+                return false;
+            });
+        }
+        // Set the thread into a terminated state in the data-race detector
+        if let Some(data_race) = data_race {
+            data_race.thread_terminated();
+        }
+        // Check if we need to unblock any threads.
+        for (i, thread) in self.threads.iter_enumerated_mut() {
+            if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
+                // The thread has terminated, mark happens-before edge to joining thread
+                if let Some(data_race) = data_race {
+                    data_race.thread_joined(i, self.active_thread);
+                }
+                trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
+                thread.state = ThreadState::Enabled;
+            }
+        }
+        return free_tls_statics;
+    }
+
     /// Decide which action to take next and on which thread.
     ///
     /// The currently implemented scheduling policy is the one that is commonly
     /// used in stateless model checkers such as Loom: run the active thread as
     /// long as we can and switch only when we have to (the active thread was
     /// blocked, terminated, or has explicitly asked to be preempted).
-    fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
+    fn schedule(&mut self, data_race: &Option<Rc<data_race::GlobalState>>) -> InterpResult<'tcx, SchedulingAction> {
         // Check whether the thread has **just** terminated (`check_terminated`
         // checks whether the thread has popped all its stack and if yes, sets
         // the thread state to terminated).
         if self.threads[self.active_thread].check_terminated() {
-            // Check if we need to unblock any threads.
-            for (i, thread) in self.threads.iter_enumerated_mut() {
-                if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
-                    trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
-                    thread.state = ThreadState::Enabled;
-                }
-            }
             return Ok(SchedulingAction::ExecuteDtors);
         }
+        // If we get here again and the thread is *still* terminated, there are no more dtors to run.
         if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
             // The main thread terminated; stop the program.
             if self.threads.iter().any(|thread| thread.state != ThreadState::Terminated) {
@@ -448,31 +491,33 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
             }
             return Ok(SchedulingAction::Stop);
         }
-        // At least for `pthread_cond_timedwait` we need to report timeout when
-        // the function is called already after the specified time even if a
-        // signal is received before the thread gets scheduled. Therefore, we
-        // need to schedule all timeout callbacks before we continue regular
-        // execution.
-        //
-        // Documentation:
-        // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html#
-        let potential_sleep_time =
-            self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min();
-        if potential_sleep_time == Some(Duration::new(0, 0)) {
-            return Ok(SchedulingAction::ExecuteTimeoutCallback);
-        }
-        // No callbacks scheduled, pick a regular thread to execute.
+        // This thread and the program can keep going.
         if self.threads[self.active_thread].state == ThreadState::Enabled
             && !self.yield_active_thread
         {
             // The currently active thread is still enabled, just continue with it.
             return Ok(SchedulingAction::ExecuteStep);
         }
+        // The active thread yielded. Let's see if there are any timeouts to take care of. We do
+        // this *before* running any other thread, to ensure that timeouts "in the past" fire before
+        // any other thread can take an action. This ensures that for `pthread_cond_timedwait`, "an
+        // error is returned if [...] the absolute time specified by abstime has already been passed
+        // at the time of the call".
+        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
+        let potential_sleep_time =
+            self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time()).min();
+        if potential_sleep_time == Some(Duration::new(0, 0)) {
+            return Ok(SchedulingAction::ExecuteTimeoutCallback);
+        }
+        // No callbacks scheduled, pick a regular thread to execute.
         // We need to pick a new thread for execution.
         for (id, thread) in self.threads.iter_enumerated() {
             if thread.state == ThreadState::Enabled {
                 if !self.yield_active_thread || id != self.active_thread {
                     self.active_thread = id;
+                    if let Some(data_race) = data_race {
+                        data_race.thread_set_active(self.active_thread);
+                    }
                     break;
                 }
             }
@@ -499,48 +544,10 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
 // Public interface to thread management.
 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
-    /// A workaround for thread-local statics until
-    /// https://github.com/rust-lang/rust/issues/70685 is fixed: change the
-    /// thread-local allocation id with a freshly generated allocation id for
-    /// the currently active thread.
-    fn remap_thread_local_alloc_ids(
-        &self,
-        val: &mut mir::interpret::ConstValue<'tcx>,
-    ) -> InterpResult<'tcx> {
-        let this = self.eval_context_ref();
-        match *val {
-            mir::interpret::ConstValue::Scalar(Scalar::Ptr(ref mut ptr)) => {
-                let alloc_id = ptr.alloc_id;
-                let alloc = this.tcx.get_global_alloc(alloc_id);
-                let tcx = this.tcx;
-                let is_thread_local = |def_id| {
-                    tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
-                };
-                match alloc {
-                    Some(GlobalAlloc::Static(def_id)) if is_thread_local(def_id) => {
-                        ptr.alloc_id = this.get_or_create_thread_local_alloc_id(def_id)?;
-                    }
-                    _ => {}
-                }
-            }
-            _ => {
-                // FIXME: Handling only `Scalar` seems to work for now, but at
-                // least in principle thread-locals could be in any constant, so
-                // we should also consider other cases. However, once
-                // https://github.com/rust-lang/rust/issues/70685 gets fixed,
-                // this code will have to be rewritten anyway.
-            }
-        }
-        Ok(())
-    }
-
     /// Get a thread-specific allocation id for the given thread-local static.
     /// If needed, allocate a new one.
-    ///
-    /// FIXME: This method should be replaced as soon as
-    /// https://github.com/rust-lang/rust/issues/70685 gets fixed.
-    fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tcx, AllocId> {
-        let this = self.eval_context_ref();
+    fn get_or_create_thread_local_alloc_id(&mut self, def_id: DefId) -> InterpResult<'tcx, AllocId> {
+        let this = self.eval_context_mut();
         let tcx = this.tcx;
         if let Some(new_alloc_id) = this.machine.threads.get_thread_local_alloc_id(def_id) {
             // We already have a thread-specific allocation id for this
@@ -549,35 +556,13 @@ fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tc
         } else {
             // We need to allocate a thread-specific allocation id for this
             // thread-local static.
-            //
-            // At first, we invoke the `const_eval_raw` query and extract the
-            // allocation from it. Unfortunately, we have to duplicate the code
-            // from `Memory::get_global_alloc` that does this.
-            //
-            // Then we store the retrieved allocation back into the `alloc_map`
-            // to get a fresh allocation id, which we can use as a
-            // thread-specific allocation id for the thread-local static.
+            // First, we compute the initial value for this static.
             if tcx.is_foreign_item(def_id) {
                 throw_unsup_format!("foreign thread-local statics are not supported");
             }
-            // Invoke the `const_eval_raw` query.
-            let instance = Instance::mono(tcx.tcx, def_id);
-            let gid = GlobalId { instance, promoted: None };
-            let raw_const =
-                tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| {
-                    // no need to report anything, the const_eval call takes care of that
-                    // for statics
-                    assert!(tcx.is_static(def_id));
-                    err
-                })?;
-            let id = raw_const.alloc_id;
-            // Extract the allocation from the query result.
-            let allocation = tcx.global_alloc(id).unwrap_memory();
-            // Create a new allocation id for the same allocation in this hacky
-            // way. Internally, `alloc_map` deduplicates allocations, but this
-            // is fine because Miri will make a copy before a first mutable
-            // access.
-            let new_alloc_id = tcx.create_memory_alloc(allocation);
+            let allocation = tcx.eval_static_initializer(def_id)?;
+            // Create a fresh allocation with this content.
+            let new_alloc_id = this.memory.allocate_with(allocation.clone(), MiriMemoryKind::Tls.into()).alloc_id;
             this.machine.threads.set_thread_local_alloc_id(def_id, new_alloc_id);
             Ok(new_alloc_id)
         }
@@ -586,7 +571,11 @@ fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tc
     #[inline]
     fn create_thread(&mut self) -> ThreadId {
         let this = self.eval_context_mut();
-        this.machine.threads.create_thread()
+        let id = this.machine.threads.create_thread();
+        if let Some(data_race) = &this.memory.extra.data_race {
+            data_race.thread_created(id);
+        }
+        id
     }
 
     #[inline]
@@ -598,12 +587,17 @@ fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> {
     #[inline]
     fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        this.machine.threads.join_thread(joined_thread_id)
+        let data_race = &this.memory.extra.data_race;
+        this.machine.threads.join_thread(joined_thread_id, data_race)?;
+        Ok(())
     }
 
     #[inline]
     fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
         let this = self.eval_context_mut();
+        if let Some(data_race) = &this.memory.extra.data_race {
+            data_race.thread_set_active(thread_id);
+        }
         this.machine.threads.set_active_thread_id(thread_id)
     }
 
@@ -613,6 +607,18 @@ fn get_active_thread(&self) -> ThreadId {
         this.machine.threads.get_active_thread_id()
     }
 
+    #[inline]
+    fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
+        let this = self.eval_context_mut();
+        this.machine.threads.active_thread_mut()
+    }
+
+    #[inline]
+    fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
+        let this = self.eval_context_ref();
+        this.machine.threads.active_thread_ref()
+    }
+
     #[inline]
     fn get_total_thread_count(&self) -> usize {
         let this = self.eval_context_ref();
@@ -646,6 +652,13 @@ fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'mir, 'tcx, Tag, FrameDa
     #[inline]
     fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
         let this = self.eval_context_mut();
+        if let Some(data_race) = &this.memory.extra.data_race {
+            if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
+                data_race.thread_set_name(
+                    this.machine.threads.active_thread, string
+                );
+            }
+        }
         this.machine.threads.set_thread_name(new_thread_name);
     }
 
@@ -715,6 +728,22 @@ fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
     #[inline]
     fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
         let this = self.eval_context_mut();
-        this.machine.threads.schedule()
+        let data_race = &this.memory.extra.data_race;
+        this.machine.threads.schedule(data_race)
+    }
+
+    /// Handles thread termination of the active thread: wakes up threads joining on this one,
+    /// and deallocated thread-local statics.
+    ///
+    /// This is called from `tls.rs` after handling the TLS dtors.
+    #[inline]
+    fn thread_terminated(&mut self) -> InterpResult<'tcx> {
+        let this = self.eval_context_mut();
+        let data_race = &this.memory.extra.data_race;
+        for alloc_id in this.machine.threads.thread_terminated(data_race) {
+            let ptr = this.memory.global_base_pointer(alloc_id.into())?;
+            this.memory.deallocate(ptr, None, MiriMemoryKind::Tls.into())?;
+        }
+        Ok(())
     }
 }