]> git.lizzy.rs Git - rust.git/blobdiff - src/sync.rs
Auto merge of #2426 - saethlin:unix-exec, r=RalfJung
[rust.git] / src / sync.rs
index 107ad5ace1723787912644331e5c6771128b4f02..5571bbd8f2dc577da6257c1e069a617212170edb 100644 (file)
@@ -1,8 +1,9 @@
 use std::collections::{hash_map::Entry, HashMap, VecDeque};
-use std::convert::TryFrom;
 use std::num::NonZeroU32;
 use std::ops::Not;
 
+use log::trace;
+
 use rustc_index::vec::{Idx, IndexVec};
 
 use crate::*;
@@ -41,7 +42,7 @@ fn index(self) -> usize {
         }
 
         impl $name {
-            pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
+            pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Provenance> {
                 Scalar::from_u32(self.0.get())
             }
         }
@@ -59,6 +60,12 @@ struct Mutex {
     lock_count: usize,
     /// The queue of threads waiting for this mutex.
     queue: VecDeque<ThreadId>,
+    /// Data race handle, this tracks the happens-before
+    /// relationship between each mutex access. It is
+    /// released to during unlock and acquired from during
+    /// locking, and therefore stores the clock of the last
+    /// thread to release this mutex.
+    data_race: VClock,
 }
 
 declare_id!(RwLockId);
@@ -75,6 +82,25 @@ struct RwLock {
     writer_queue: VecDeque<ThreadId>,
     /// The queue of reader threads waiting for this lock.
     reader_queue: VecDeque<ThreadId>,
+    /// Data race handle for writers, tracks the happens-before
+    /// ordering between each write access to a rwlock and is updated
+    /// after a sequence of concurrent readers to track the happens-
+    /// before ordering between the set of previous readers and
+    /// the current writer.
+    /// Contains the clock of the last thread to release a writer
+    /// lock or the joined clock of the set of last threads to release
+    /// shared reader locks.
+    data_race: VClock,
+    /// Data race handle for readers, this is temporary storage
+    /// for the combined happens-before ordering for between all
+    /// concurrent readers and the next writer, and the value
+    /// is stored to the main data_race variable once all
+    /// readers are finished.
+    /// Has to be stored separately since reader lock acquires
+    /// must load the clock of the last write and must not
+    /// add happens-before orderings between shared reader
+    /// locks.
+    data_race_reader: VClock,
 }
 
 declare_id!(CondvarId);
@@ -92,6 +118,33 @@ struct CondvarWaiter {
 #[derive(Default, Debug)]
 struct Condvar {
     waiters: VecDeque<CondvarWaiter>,
+    /// Tracks the happens-before relationship
+    /// between a cond-var signal and a cond-var
+    /// wait during a non-suprious signal event.
+    /// Contains the clock of the last thread to
+    /// perform a futex-signal.
+    data_race: VClock,
+}
+
+/// The futex state.
+#[derive(Default, Debug)]
+struct Futex {
+    waiters: VecDeque<FutexWaiter>,
+    /// Tracks the happens-before relationship
+    /// between a futex-wake and a futex-wait
+    /// during a non-spurious wake event.
+    /// Contains the clock of the last thread to
+    /// perform a futex-wake.
+    data_race: VClock,
+}
+
+/// A thread waiting on a futex.
+#[derive(Debug)]
+struct FutexWaiter {
+    /// The thread that is waiting on this futex.
+    thread: ThreadId,
+    /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
+    bitset: u32,
 }
 
 /// The state of all synchronization variables.
@@ -100,6 +153,53 @@ pub(super) struct SynchronizationState {
     mutexes: IndexVec<MutexId, Mutex>,
     rwlocks: IndexVec<RwLockId, RwLock>,
     condvars: IndexVec<CondvarId, Condvar>,
+    futexes: HashMap<u64, Futex>,
+}
+
+// Private extension trait for local helper methods
+impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
+trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
+    /// Take a reader out of the queue waiting for the lock.
+    /// Returns `true` if some thread got the rwlock.
+    #[inline]
+    fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
+        let this = self.eval_context_mut();
+        if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
+            this.unblock_thread(reader);
+            this.rwlock_reader_lock(id, reader);
+            true
+        } else {
+            false
+        }
+    }
+
+    /// Take the writer out of the queue waiting for the lock.
+    /// Returns `true` if some thread got the rwlock.
+    #[inline]
+    fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
+        let this = self.eval_context_mut();
+        if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
+            this.unblock_thread(writer);
+            this.rwlock_writer_lock(id, writer);
+            true
+        } else {
+            false
+        }
+    }
+
+    /// Take a thread out of the queue waiting for the mutex, and lock
+    /// the mutex for it. Returns `true` if some thread has the mutex now.
+    #[inline]
+    fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
+        let this = self.eval_context_mut();
+        if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
+            this.unblock_thread(thread);
+            this.mutex_lock(id, thread);
+            true
+        } else {
+            false
+        }
+    }
 }
 
 // Public interface to synchronization primitives. Please note that in most
@@ -115,6 +215,24 @@ fn mutex_create(&mut self) -> MutexId {
         this.machine.threads.sync.mutexes.push(Default::default())
     }
 
+    #[inline]
+    /// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
+    /// otherwise returns the value from the closure
+    fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
+    where
+        F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
+    {
+        let this = self.eval_context_mut();
+        let next_index = this.machine.threads.sync.mutexes.next_index();
+        if let Some(old) = existing(this, next_index)? {
+            Ok(old)
+        } else {
+            let new_index = this.machine.threads.sync.mutexes.push(Default::default());
+            assert_eq!(next_index, new_index);
+            Ok(new_index)
+        }
+    }
+
     #[inline]
     /// Get the id of the thread that currently owns this lock.
     fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
@@ -124,8 +242,8 @@ fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
 
     #[inline]
     /// Check if locked.
-    fn mutex_is_locked(&mut self, id: MutexId) -> bool {
-        let this = self.eval_context_mut();
+    fn mutex_is_locked(&self, id: MutexId) -> bool {
+        let this = self.eval_context_ref();
         this.machine.threads.sync.mutexes[id].owner.is_some()
     }
 
@@ -143,27 +261,23 @@ fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) {
             mutex.owner = Some(thread);
         }
         mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
+        if let Some(data_race) = &this.machine.data_race {
+            data_race.validate_lock_acquire(&mutex.data_race, thread);
+        }
     }
 
-    /// Try unlocking by decreasing the lock count and returning the old owner
-    /// and the old lock count. If the lock count reaches 0, release the lock
-    /// and potentially give to a new owner. If the lock was not locked, return
-    /// `None`.
-    ///
-    /// Note: It is the caller's responsibility to check that the thread that
-    /// unlocked the lock actually is the same one, which owned it.
-    fn mutex_unlock(
-        &mut self,
-        id: MutexId,
-        expected_owner: ThreadId,
-    ) -> InterpResult<'tcx, Option<usize>> {
+    /// Try unlocking by decreasing the lock count and returning the old lock
+    /// count. If the lock count reaches 0, release the lock and potentially
+    /// give to a new owner. If the lock was not locked by `expected_owner`,
+    /// return `None`.
+    fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize> {
         let this = self.eval_context_mut();
         let mutex = &mut this.machine.threads.sync.mutexes[id];
         if let Some(current_owner) = mutex.owner {
             // Mutex is locked.
             if current_owner != expected_owner {
                 // Only the owner can unlock the mutex.
-                return Ok(None);
+                return None;
             }
             let old_lock_count = mutex.lock_count;
             mutex.lock_count = old_lock_count
@@ -173,53 +287,76 @@ fn mutex_unlock(
                 mutex.owner = None;
                 // The mutex is completely unlocked. Try transfering ownership
                 // to another thread.
-                if let Some(new_owner) = this.mutex_dequeue(id) {
-                    this.mutex_lock(id, new_owner);
-                    this.unblock_thread(new_owner)?;
+                if let Some(data_race) = &this.machine.data_race {
+                    data_race.validate_lock_release(&mut mutex.data_race, current_owner);
                 }
+                this.mutex_dequeue_and_lock(id);
             }
-            Ok(Some(old_lock_count))
+            Some(old_lock_count)
         } else {
-            // Mutex is unlocked.
-            Ok(None)
+            // Mutex is not locked.
+            None
         }
     }
 
     #[inline]
-    /// Put the thread into the queue waiting for the lock.
-    fn mutex_enqueue(&mut self, id: MutexId, thread: ThreadId) {
+    /// Put the thread into the queue waiting for the mutex.
+    fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
         let this = self.eval_context_mut();
         assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
         this.machine.threads.sync.mutexes[id].queue.push_back(thread);
+        this.block_thread(thread);
     }
 
     #[inline]
-    /// Take a thread out of the queue waiting for the lock.
-    fn mutex_dequeue(&mut self, id: MutexId) -> Option<ThreadId> {
+    /// Create state for a new read write lock.
+    fn rwlock_create(&mut self) -> RwLockId {
         let this = self.eval_context_mut();
-        this.machine.threads.sync.mutexes[id].queue.pop_front()
+        this.machine.threads.sync.rwlocks.push(Default::default())
     }
 
     #[inline]
-    /// Create state for a new read write lock.
-    fn rwlock_create(&mut self) -> RwLockId {
+    /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
+    /// otherwise returns the value from the closure
+    fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
+    where
+        F: FnOnce(
+            &mut MiriEvalContext<'mir, 'tcx>,
+            RwLockId,
+        ) -> InterpResult<'tcx, Option<RwLockId>>,
+    {
         let this = self.eval_context_mut();
-        this.machine.threads.sync.rwlocks.push(Default::default())
+        let next_index = this.machine.threads.sync.rwlocks.next_index();
+        if let Some(old) = existing(this, next_index)? {
+            Ok(old)
+        } else {
+            let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
+            assert_eq!(next_index, new_index);
+            Ok(new_index)
+        }
     }
 
     #[inline]
     /// Check if locked.
-    fn rwlock_is_locked(&mut self, id: RwLockId) -> bool {
-        let this = self.eval_context_mut();
-        this.machine.threads.sync.rwlocks[id].writer.is_some()
-            || this.machine.threads.sync.rwlocks[id].readers.is_empty().not()
+    fn rwlock_is_locked(&self, id: RwLockId) -> bool {
+        let this = self.eval_context_ref();
+        let rwlock = &this.machine.threads.sync.rwlocks[id];
+        trace!(
+            "rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
+            id,
+            rwlock.writer,
+            rwlock.readers.len(),
+        );
+        rwlock.writer.is_some() || rwlock.readers.is_empty().not()
     }
 
     #[inline]
     /// Check if write locked.
-    fn rwlock_is_write_locked(&mut self, id: RwLockId) -> bool {
-        let this = self.eval_context_mut();
-        this.machine.threads.sync.rwlocks[id].writer.is_some()
+    fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
+        let this = self.eval_context_ref();
+        let rwlock = &this.machine.threads.sync.rwlocks[id];
+        trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
+        rwlock.writer.is_some()
     }
 
     /// Read-lock the lock by adding the `reader` the list of threads that own
@@ -227,88 +364,145 @@ fn rwlock_is_write_locked(&mut self, id: RwLockId) -> bool {
     fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
         let this = self.eval_context_mut();
         assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
-        let count = this.machine.threads.sync.rwlocks[id].readers.entry(reader).or_insert(0);
+        trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
+        let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+        let count = rwlock.readers.entry(reader).or_insert(0);
         *count = count.checked_add(1).expect("the reader counter overflowed");
+        if let Some(data_race) = &this.machine.data_race {
+            data_race.validate_lock_acquire(&rwlock.data_race, reader);
+        }
     }
 
-    /// Try read-unlock the lock for `reader`. Returns `true` if succeeded,
-    /// `false` if this `reader` did not hold the lock.
+    /// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
+    /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
     fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
         let this = self.eval_context_mut();
-        match this.machine.threads.sync.rwlocks[id].readers.entry(reader) {
+        let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+        match rwlock.readers.entry(reader) {
             Entry::Occupied(mut entry) => {
                 let count = entry.get_mut();
                 assert!(*count > 0, "rwlock locked with count == 0");
                 *count -= 1;
                 if *count == 0 {
+                    trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, reader);
                     entry.remove();
+                } else {
+                    trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, reader);
                 }
-                true
             }
-            Entry::Vacant(_) => false,
+            Entry::Vacant(_) => return false, // we did not even own this lock
+        }
+        if let Some(data_race) = &this.machine.data_race {
+            data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
         }
+
+        // The thread was a reader. If the lock is not held any more, give it to a writer.
+        if this.rwlock_is_locked(id).not() {
+            // All the readers are finished, so set the writer data-race handle to the value
+            //  of the union of all reader data race handles, since the set of readers
+            //  happen-before the writers
+            let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+            rwlock.data_race.clone_from(&rwlock.data_race_reader);
+            this.rwlock_dequeue_and_lock_writer(id);
+        }
+        true
     }
 
     #[inline]
     /// Put the reader in the queue waiting for the lock and block it.
-    fn rwlock_enqueue_and_block_reader(
-        &mut self,
-        id: RwLockId,
-        reader: ThreadId,
-    ) -> InterpResult<'tcx> {
+    fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
         let this = self.eval_context_mut();
-        assert!(this.rwlock_is_write_locked(id), "queueing on not write locked lock");
+        assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
         this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
-        this.block_thread(reader)
-    }
-
-    #[inline]
-    /// Take a reader out the queue waiting for the lock.
-    fn rwlock_dequeue_reader(&mut self, id: RwLockId) -> Option<ThreadId> {
-        let this = self.eval_context_mut();
-        this.machine.threads.sync.rwlocks[id].reader_queue.pop_front()
+        this.block_thread(reader);
     }
 
     #[inline]
     /// Lock by setting the writer that owns the lock.
     fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
         let this = self.eval_context_mut();
-        assert!(!this.rwlock_is_locked(id), "the lock is already locked");
-        this.machine.threads.sync.rwlocks[id].writer = Some(writer);
+        assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
+        trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
+        let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+        rwlock.writer = Some(writer);
+        if let Some(data_race) = &this.machine.data_race {
+            data_race.validate_lock_acquire(&rwlock.data_race, writer);
+        }
     }
 
     #[inline]
     /// Try to unlock by removing the writer.
-    fn rwlock_writer_unlock(&mut self, id: RwLockId) -> Option<ThreadId> {
+    fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
         let this = self.eval_context_mut();
-        this.machine.threads.sync.rwlocks[id].writer.take()
+        let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+        if let Some(current_writer) = rwlock.writer {
+            if current_writer != expected_writer {
+                // Only the owner can unlock the rwlock.
+                return false;
+            }
+            rwlock.writer = None;
+            trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
+            // Release memory to both reader and writer vector clocks
+            //  since this writer happens-before both the union of readers once they are finished
+            //  and the next writer
+            if let Some(data_race) = &this.machine.data_race {
+                data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
+                data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
+            }
+            // The thread was a writer.
+            //
+            // We are prioritizing writers here against the readers. As a
+            // result, not only readers can starve writers, but also writers can
+            // starve readers.
+            if this.rwlock_dequeue_and_lock_writer(id) {
+                // Someone got the write lock, nice.
+            } else {
+                // Give the lock to all readers.
+                while this.rwlock_dequeue_and_lock_reader(id) {
+                    // Rinse and repeat.
+                }
+            }
+            true
+        } else {
+            false
+        }
     }
 
     #[inline]
     /// Put the writer in the queue waiting for the lock.
-    fn rwlock_enqueue_and_block_writer(
-        &mut self,
-        id: RwLockId,
-        writer: ThreadId,
-    ) -> InterpResult<'tcx> {
+    fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
         let this = self.eval_context_mut();
-        assert!(this.rwlock_is_locked(id), "queueing on unlocked lock");
+        assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
         this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
-        this.block_thread(writer)
+        this.block_thread(writer);
     }
 
     #[inline]
-    /// Take the writer out the queue waiting for the lock.
-    fn rwlock_dequeue_writer(&mut self, id: RwLockId) -> Option<ThreadId> {
+    /// Create state for a new conditional variable.
+    fn condvar_create(&mut self) -> CondvarId {
         let this = self.eval_context_mut();
-        this.machine.threads.sync.rwlocks[id].writer_queue.pop_front()
+        this.machine.threads.sync.condvars.push(Default::default())
     }
 
     #[inline]
-    /// Create state for a new conditional variable.
-    fn condvar_create(&mut self) -> CondvarId {
+    /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
+    /// otherwise returns the value from the closure
+    fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
+    where
+        F: FnOnce(
+            &mut MiriEvalContext<'mir, 'tcx>,
+            CondvarId,
+        ) -> InterpResult<'tcx, Option<CondvarId>>,
+    {
         let this = self.eval_context_mut();
-        this.machine.threads.sync.condvars.push(Default::default())
+        let next_index = this.machine.threads.sync.condvars.next_index();
+        if let Some(old) = existing(this, next_index)? {
+            Ok(old)
+        } else {
+            let new_index = this.machine.threads.sync.condvars.push(Default::default());
+            assert_eq!(next_index, new_index);
+            Ok(new_index)
+        }
     }
 
     #[inline]
@@ -330,10 +524,20 @@ fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, mutex: MutexId) {
     /// variable.
     fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
         let this = self.eval_context_mut();
-        this.machine.threads.sync.condvars[id]
-            .waiters
-            .pop_front()
-            .map(|waiter| (waiter.thread, waiter.mutex))
+        let current_thread = this.get_active_thread();
+        let condvar = &mut this.machine.threads.sync.condvars[id];
+        let data_race = &this.machine.data_race;
+
+        // Each condvar signal happens-before the end of the condvar wake
+        if let Some(data_race) = data_race {
+            data_race.validate_lock_release(&mut condvar.data_race, current_thread);
+        }
+        condvar.waiters.pop_front().map(|waiter| {
+            if let Some(data_race) = data_race {
+                data_race.validate_lock_acquire(&condvar.data_race, waiter.thread);
+            }
+            (waiter.thread, waiter.mutex)
+        })
     }
 
     #[inline]
@@ -342,4 +546,40 @@ fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId) {
         let this = self.eval_context_mut();
         this.machine.threads.sync.condvars[id].waiters.retain(|waiter| waiter.thread != thread);
     }
+
+    fn futex_wait(&mut self, addr: u64, thread: ThreadId, bitset: u32) {
+        let this = self.eval_context_mut();
+        let futex = &mut this.machine.threads.sync.futexes.entry(addr).or_default();
+        let waiters = &mut futex.waiters;
+        assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
+        waiters.push_back(FutexWaiter { thread, bitset });
+    }
+
+    fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId> {
+        let this = self.eval_context_mut();
+        let current_thread = this.get_active_thread();
+        let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
+        let data_race = &this.machine.data_race;
+
+        // Each futex-wake happens-before the end of the futex wait
+        if let Some(data_race) = data_race {
+            data_race.validate_lock_release(&mut futex.data_race, current_thread);
+        }
+
+        // Wake up the first thread in the queue that matches any of the bits in the bitset.
+        futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| {
+            let waiter = futex.waiters.remove(i).unwrap();
+            if let Some(data_race) = data_race {
+                data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
+            }
+            waiter.thread
+        })
+    }
+
+    fn futex_remove_waiter(&mut self, addr: u64, thread: ThreadId) {
+        let this = self.eval_context_mut();
+        if let Some(futex) = this.machine.threads.sync.futexes.get_mut(&addr) {
+            futex.waiters.retain(|waiter| waiter.thread != thread);
+        }
+    }
 }