]> git.lizzy.rs Git - rust.git/commitdiff
move interpreter loop into thread manager; they are pretty tightly coupled anyway
authorRalf Jung <post@ralfj.de>
Sun, 27 Nov 2022 14:19:00 +0000 (15:19 +0100)
committerRalf Jung <post@ralfj.de>
Mon, 28 Nov 2022 07:53:14 +0000 (08:53 +0100)
src/tools/miri/src/concurrency/thread.rs
src/tools/miri/src/eval.rs
src/tools/miri/src/lib.rs

index fde47ed9ff1ec7f8545e39ae27bd7827285ffdd5..900d24443cc9668b454dc038271ab0b9e2535c26 100644 (file)
@@ -21,7 +21,7 @@
 use crate::*;
 
 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum SchedulingAction {
+enum SchedulingAction {
     /// Execute step on the active thread.
     ExecuteStep,
     /// Execute a timeout callback.
@@ -450,8 +450,7 @@ fn enable_thread(&mut self, thread_id: ThreadId) {
     }
 
     /// Get a mutable borrow of the currently active thread.
-    /// (Private for a bit of protection.)
-    fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
+    pub fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
         &mut self.threads[self.active_thread]
     }
 
@@ -718,6 +717,51 @@ fn schedule(&mut self, clock: &Clock) -> InterpResult<'tcx, SchedulingAction> {
     }
 }
 
+impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {}
+trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
+    /// Execute a timeout callback on the callback's thread.
+    #[inline]
+    fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
+        let this = self.eval_context_mut();
+        let (thread, callback) = if let Some((thread, callback)) =
+            this.machine.threads.get_ready_callback(&this.machine.clock)
+        {
+            (thread, callback)
+        } else {
+            // get_ready_callback can return None if the computer's clock
+            // was shifted after calling the scheduler and before the call
+            // to get_ready_callback (see issue
+            // https://github.com/rust-lang/miri/issues/1763). In this case,
+            // just do nothing, which effectively just returns to the
+            // scheduler.
+            return Ok(());
+        };
+        // This back-and-forth with `set_active_thread` is here because of two
+        // design decisions:
+        // 1. Make the caller and not the callback responsible for changing
+        //    thread.
+        // 2. Make the scheduler the only place that can change the active
+        //    thread.
+        let old_thread = this.set_active_thread(thread);
+        callback.call(this)?;
+        this.set_active_thread(old_thread);
+        Ok(())
+    }
+
+    #[inline]
+    fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
+        let this = self.eval_context_mut();
+        let mut callback = this
+            .active_thread_mut()
+            .on_stack_empty
+            .take()
+            .expect("`on_stack_empty` not set up, or already running");
+        let res = callback(this)?;
+        this.active_thread_mut().on_stack_empty = Some(callback);
+        Ok(res)
+    }
+}
+
 // Public interface to thread management.
 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
@@ -961,53 +1005,29 @@ fn unregister_timeout_callback_if_exists(&mut self, thread: ThreadId) {
         this.machine.threads.unregister_timeout_callback_if_exists(thread);
     }
 
-    /// Execute a timeout callback on the callback's thread.
-    #[inline]
-    fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
+    /// Run the core interpreter loop. Returns only when an interrupt occurs (an error or program
+    /// termination).
+    fn run_threads(&mut self) -> InterpResult<'tcx, !> {
         let this = self.eval_context_mut();
-        let (thread, callback) = if let Some((thread, callback)) =
-            this.machine.threads.get_ready_callback(&this.machine.clock)
-        {
-            (thread, callback)
-        } else {
-            // get_ready_callback can return None if the computer's clock
-            // was shifted after calling the scheduler and before the call
-            // to get_ready_callback (see issue
-            // https://github.com/rust-lang/miri/issues/1763). In this case,
-            // just do nothing, which effectively just returns to the
-            // scheduler.
-            return Ok(());
-        };
-        // This back-and-forth with `set_active_thread` is here because of two
-        // design decisions:
-        // 1. Make the caller and not the callback responsible for changing
-        //    thread.
-        // 2. Make the scheduler the only place that can change the active
-        //    thread.
-        let old_thread = this.set_active_thread(thread);
-        callback.call(this)?;
-        this.set_active_thread(old_thread);
-        Ok(())
-    }
-
-    #[inline]
-    fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
-        let this = self.eval_context_mut();
-        let mut callback = this
-            .active_thread_mut()
-            .on_stack_empty
-            .take()
-            .expect("`on_stack_empty` not set up, or already running");
-        let res = callback(this)?;
-        this.active_thread_mut().on_stack_empty = Some(callback);
-        Ok(res)
-    }
-
-    /// Decide which action to take next and on which thread.
-    #[inline]
-    fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
-        let this = self.eval_context_mut();
-        this.machine.threads.schedule(&this.machine.clock)
+        loop {
+            match this.machine.threads.schedule(&this.machine.clock)? {
+                SchedulingAction::ExecuteStep => {
+                    if !this.step()? {
+                        // See if this thread can do something else.
+                        match this.run_on_stack_empty()? {
+                            Poll::Pending => {} // keep going
+                            Poll::Ready(()) => this.terminate_active_thread()?,
+                        }
+                    }
+                }
+                SchedulingAction::ExecuteTimeoutCallback => {
+                    this.run_timeout_callback()?;
+                }
+                SchedulingAction::Sleep(duration) => {
+                    this.machine.clock.sleep(duration);
+                }
+            }
+        }
     }
 
     /// Handles thread termination of the active thread: wakes up threads joining on this one,
@@ -1015,7 +1035,7 @@ fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
     ///
     /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
     #[inline]
-    fn thread_terminated(&mut self) -> InterpResult<'tcx> {
+    fn terminate_active_thread(&mut self) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
         let thread = this.active_thread_mut();
         assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
index f12c3518e831d93ed72a117f0b845b1f4423a253..c7f3c9577a8754db8dce549f808c7642ec2e4376 100644 (file)
@@ -229,9 +229,9 @@ fn on_main_stack_empty<'tcx>(
                     this.machine.layouts.isize,
                 );
                 let exit_code = this.read_scalar(&ret_place.into())?.to_machine_isize(this)?;
-                // Need to call `thread_terminated` ourselves since we are not going to
-                // return to the scheduler loop.
-                this.thread_terminated()?;
+                // Need to call this ourselves since we are not going to return to the scheduler
+                // loop, and we want the main thread TLS to not show up as memory leaks.
+                this.terminate_active_thread()?;
                 // Stop interpreter loop.
                 throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true });
             }
@@ -416,28 +416,8 @@ pub fn eval_entry<'tcx>(
     };
 
     // Perform the main execution.
-    let res: thread::Result<InterpResult<'_, !>> = panic::catch_unwind(AssertUnwindSafe(|| {
-        // Main loop. Goes on forever until an interrupt is triggered (represented as `InterpError`).
-        loop {
-            match ecx.schedule()? {
-                SchedulingAction::ExecuteStep => {
-                    if !ecx.step()? {
-                        // See if this thread can do something else.
-                        match ecx.run_on_stack_empty()? {
-                            Poll::Pending => {} // keep going
-                            Poll::Ready(()) => ecx.thread_terminated()?,
-                        }
-                    }
-                }
-                SchedulingAction::ExecuteTimeoutCallback => {
-                    ecx.run_timeout_callback()?;
-                }
-                SchedulingAction::Sleep(duration) => {
-                    ecx.machine.clock.sleep(duration);
-                }
-            }
-        }
-    }));
+    let res: thread::Result<InterpResult<'_, !>> =
+        panic::catch_unwind(AssertUnwindSafe(|| ecx.run_threads()));
     let res = res.unwrap_or_else(|panic_payload| {
         ecx.handle_ice();
         panic::resume_unwind(panic_payload)
index a759a81b7730476fc32ad3d92f9ad842a7ff8740..6f483cf2cc4800d84ef71ef4781de05e65437f66 100644 (file)
@@ -89,9 +89,7 @@
     data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
     init_once::{EvalContextExt as _, InitOnceId},
     sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
-    thread::{
-        EvalContextExt as _, SchedulingAction, StackEmptyCallback, ThreadId, ThreadManager, Time,
-    },
+    thread::{EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, Time},
 };
 pub use crate::diagnostics::{
     report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,