]> git.lizzy.rs Git - rust.git/blobdiff - src/shims/tls.rs
Auto merge of #2183 - RalfJung:better-provenance-control, r=RalfJung
[rust.git] / src / shims / tls.rs
index 3339e3bee19990d2378759a9803fca3d842abdbf..6b4e9d4f753376cd7d1d274194acbc7e665680b7 100644 (file)
@@ -1,14 +1,14 @@
 //! Implement thread-local storage.
 
-use std::collections::BTreeMap;
 use std::collections::btree_map::Entry as BTreeEntry;
 use std::collections::hash_map::Entry as HashMapEntry;
+use std::collections::BTreeMap;
 
 use log::trace;
 
 use rustc_data_structures::fx::FxHashMap;
 use rustc_middle::ty;
-use rustc_target::abi::{Size, HasDataLayout};
+use rustc_target::abi::{HasDataLayout, Size};
 use rustc_target::spec::abi::Abi;
 
 use crate::*;
@@ -63,7 +63,11 @@ fn default() -> Self {
 impl<'tcx> TlsData<'tcx> {
     /// Generate a new TLS key with the given destructor.
     /// `max_size` determines the integer size the key has to fit in.
-    pub fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>, max_size: Size) -> InterpResult<'tcx, TlsKey> {
+    pub fn create_tls_key(
+        &mut self,
+        dtor: Option<ty::Instance<'tcx>>,
+        max_size: Size,
+    ) -> InterpResult<'tcx, TlsKey> {
         let new_key = self.next_key;
         self.next_key += 1;
         self.keys.try_insert(new_key, TlsEntry { data: Default::default(), dtor }).unwrap();
@@ -95,7 +99,7 @@ pub fn load_tls(
             Some(TlsEntry { data, .. }) => {
                 let value = data.get(&thread_id).copied();
                 trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value);
-                Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into()))
+                Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx)))
             }
             None => throw_ub_format!("loading from a non-existing TLS key: {}", key),
         }
@@ -105,19 +109,17 @@ pub fn store_tls(
         &mut self,
         key: TlsKey,
         thread_id: ThreadId,
-        new_data: Option<Scalar<Tag>>
+        new_data: Scalar<Tag>,
+        cx: &impl HasDataLayout,
     ) -> InterpResult<'tcx> {
         match self.keys.get_mut(&key) {
             Some(TlsEntry { data, .. }) => {
-                match new_data {
-                    Some(scalar) => {
-                        trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, scalar);
-                        data.insert(thread_id, scalar);
-                    }
-                    None => {
-                        trace!("TLS key {} for thread {:?} removed", key, thread_id);
-                        data.remove(&thread_id);
-                    }
+                if new_data.to_machine_usize(cx)? != 0 {
+                    trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, new_data);
+                    data.insert(thread_id, new_data);
+                } else {
+                    trace!("TLS key {} for thread {:?} removed", key, thread_id);
+                    data.remove(&thread_id);
                 }
                 Ok(())
             }
@@ -133,19 +135,23 @@ pub fn store_tls(
     /// [`_tlv_atexit`
     /// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389):
     ///
-    ///     // NOTE: this does not need locks because it only operates on current thread data
+    /// NOTE: this does not need locks because it only operates on current thread data
     pub fn set_macos_thread_dtor(
         &mut self,
         thread: ThreadId,
         dtor: ty::Instance<'tcx>,
-        data: Scalar<Tag>
+        data: Scalar<Tag>,
     ) -> InterpResult<'tcx> {
         if self.dtors_running.contains_key(&thread) {
             // UB, according to libstd docs.
-            throw_ub_format!("setting thread's local storage destructor while destructors are already running");
+            throw_ub_format!(
+                "setting thread's local storage destructor while destructors are already running"
+            );
         }
         if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
-            throw_unsup_format!("setting more than one thread local storage destructor for the same thread is not supported");
+            throw_unsup_format!(
+                "setting more than one thread local storage destructor for the same thread is not supported"
+            );
         }
         Ok(())
     }
@@ -181,9 +187,7 @@ fn fetch_tls_dtor(
             Some(key) => Excluded(key),
             None => Unbounded,
         };
-        for (&key, TlsEntry { data, dtor }) in
-            thread_local.range_mut((start, Unbounded))
-        {
+        for (&key, TlsEntry { data, dtor }) in thread_local.range_mut((start, Unbounded)) {
             match data.entry(thread_id) {
                 BTreeEntry::Occupied(entry) => {
                     if let Some(dtor) = dtor {
@@ -237,18 +241,25 @@ fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
         // (that would be basically https://github.com/rust-lang/miri/issues/450),
         // we specifically look up the static in libstd that we know is placed
         // in that section.
-        let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local_key", "p_thread_callback"])?;
-        let thread_callback = this.memory.get_fn(thread_callback.check_init()?)?.as_instance()?;
+        let thread_callback = this.eval_path_scalar(&[
+            "std",
+            "sys",
+            "windows",
+            "thread_local_key",
+            "p_thread_callback",
+        ])?;
+        let thread_callback =
+            this.get_ptr_fn(this.scalar_to_ptr(thread_callback)?)?.as_instance()?;
 
         // The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
         let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_THREAD_DETACH"])?;
-        let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
+        let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
         this.call_function(
             thread_callback,
             Abi::System { unwind: false },
             &[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()],
-            Some(&ret_place),
-            StackPopCleanup::None { cleanup: true },
+            &ret_place,
+            StackPopCleanup::Root { cleanup: true },
         )?;
 
         this.enable_thread(active_thread);
@@ -265,13 +276,13 @@ fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
         if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
             trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
 
-            let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
+            let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
             this.call_function(
                 instance,
                 Abi::C { unwind: false },
                 &[data.into()],
-                Some(&ret_place),
-                StackPopCleanup::None { cleanup: true },
+                &ret_place,
+                StackPopCleanup::Root { cleanup: true },
             )?;
 
             // Enable the thread so that it steps through the destructor which
@@ -293,26 +304,28 @@ fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
 
         assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
         // Fetch next dtor after `key`.
-        let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone();
+        let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key;
         let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
             dtor @ Some(_) => dtor,
             // We ran each dtor once, start over from the beginning.
-            None => {
-                this.machine.tls.fetch_tls_dtor(None, active_thread)
-            }
+            None => this.machine.tls.fetch_tls_dtor(None, active_thread),
         };
         if let Some((instance, ptr, key)) = dtor {
-            this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = Some(key);
+            this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key =
+                Some(key);
             trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
-            assert!(!this.is_null(ptr).unwrap(), "data can't be NULL when dtor is called!");
+            assert!(
+                !ptr.to_machine_usize(this).unwrap() != 0,
+                "data can't be NULL when dtor is called!"
+            );
 
-            let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
+            let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
             this.call_function(
                 instance,
                 Abi::C { unwind: false },
                 &[ptr.into()],
-                Some(&ret_place),
-                StackPopCleanup::None { cleanup: true },
+                &ret_place,
+                StackPopCleanup::Root { cleanup: true },
             )?;
 
             this.enable_thread(active_thread);
@@ -326,7 +339,6 @@ fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
 
 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
-
     /// Schedule an active thread's TLS destructor to run on the active thread.
     /// Note that this function does not run the destructors itself, it just
     /// schedules them one by one each time it is called and reenables the
@@ -335,10 +347,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
     /// Note: we consistently run TLS destructors for all threads, including the
     /// main thread. However, it is not clear that we should run the TLS
     /// destructors for the main thread. See issue:
-    /// https://github.com/rust-lang/rust/issues/28129.
+    /// <https://github.com/rust-lang/rust/issues/28129>.
     fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
         let active_thread = this.get_active_thread();
+        trace!("schedule_next_tls_dtor_for_active_thread on thread {:?}", active_thread);
 
         if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
             // This is the first time we got asked to schedule a destructor. The
@@ -349,7 +362,7 @@ fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
                 // relevant function, reenabling the thread, and going back to
                 // the scheduler.
                 this.schedule_windows_tls_dtors()?;
-                return Ok(())
+                return Ok(());
             }
         }
         // The remaining dtors make some progress each time around the scheduler loop,
@@ -361,12 +374,12 @@ fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
             // We have scheduled a MacOS dtor to run on the thread. Execute it
             // to completion and come back here. Scheduling a destructor
             // destroys it, so we will not enter this branch again.
-            return Ok(())
+            return Ok(());
         }
         if this.schedule_next_pthread_tls_dtor()? {
             // We have scheduled a pthread destructor and removed it from the
             // destructors list. Run it to completion and come back here.
-            return Ok(())
+            return Ok(());
         }
 
         // All dtors done!