]> git.lizzy.rs Git - rust.git/blobdiff - src/sync.rs
rename track-raw-pointers flag to tag-raw-pointers
[rust.git] / src / sync.rs
index 7e3c27b386dfe0ba694c07154f2b6f2aef010373..0e4e9695d81d529b7160ff044c1fa2afc7c6a095 100644 (file)
@@ -61,6 +61,12 @@ struct Mutex {
     lock_count: usize,
     /// The queue of threads waiting for this mutex.
     queue: VecDeque<ThreadId>,
+    /// Data race handle, this tracks the happens-before
+    /// relationship between each mutex access. It is
+    /// released to during unlock and acquired from during
+    /// locking, and therefore stores the clock of the last
+    /// thread to release this mutex.
+    data_race: VClock,
 }
 
 declare_id!(RwLockId);
@@ -77,6 +83,25 @@ struct RwLock {
     writer_queue: VecDeque<ThreadId>,
     /// The queue of reader threads waiting for this lock.
     reader_queue: VecDeque<ThreadId>,
+    /// Data race handle for writers, tracks the happens-before
+    /// ordering between each write access to a rwlock and is updated
+    /// after a sequence of concurrent readers to track the happens-
+    /// before ordering between the set of previous readers and
+    /// the current writer.
+    /// Contains the clock of the last thread to release a writer
+    /// lock or the joined clock of the set of last threads to release
+    /// shared reader locks.
+    data_race: VClock,
+    /// Data race handle for readers, this is temporary storage
+    /// for the combined happens-before ordering for between all
+    /// concurrent readers and the next writer, and the value
+    /// is stored to the main data_race variable once all
+    /// readers are finished.
+    /// Has to be stored separately since reader lock acquires
+    /// must load the clock of the last write and must not
+    /// add happens-before orderings between shared reader
+    /// locks.
+    data_race_reader: VClock,
 }
 
 declare_id!(CondvarId);
@@ -94,6 +119,31 @@ struct CondvarWaiter {
 #[derive(Default, Debug)]
 struct Condvar {
     waiters: VecDeque<CondvarWaiter>,
+    /// Tracks the happens-before relationship
+    /// between a cond-var signal and a cond-var
+    /// wait during a non-suprious signal event.
+    /// Contains the clock of the last thread to
+    /// perform a futex-signal.
+    data_race: VClock,
+}
+
+/// The futex state.
+#[derive(Default, Debug)]
+struct Futex {
+    waiters: VecDeque<FutexWaiter>,
+    /// Tracks the happens-before relationship
+    /// between a futex-wake and a futex-wait
+    /// during a non-spurious wake event.
+    /// Contains the clock of the last thread to
+    /// perform a futex-wake.
+    data_race: VClock,
+}
+
+/// A thread waiting on a futex.
+#[derive(Debug)]
+struct FutexWaiter {
+    /// The thread that is waiting on this futex.
+    thread: ThreadId,
 }
 
 /// The state of all synchronization variables.
@@ -102,6 +152,7 @@ pub(super) struct SynchronizationState {
     mutexes: IndexVec<MutexId, Mutex>,
     rwlocks: IndexVec<RwLockId, RwLock>,
     condvars: IndexVec<CondvarId, Condvar>,
+    futexes: HashMap<u64, Futex>,
 }
 
 // Private extension trait for local helper methods
@@ -191,17 +242,16 @@ fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) {
             mutex.owner = Some(thread);
         }
         mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
+        if let Some(data_race) = &this.memory.extra.data_race {
+            data_race.validate_lock_acquire(&mutex.data_race, thread);
+        }
     }
 
     /// Try unlocking by decreasing the lock count and returning the old lock
     /// count. If the lock count reaches 0, release the lock and potentially
     /// give to a new owner. If the lock was not locked by `expected_owner`,
     /// return `None`.
-    fn mutex_unlock(
-        &mut self,
-        id: MutexId,
-        expected_owner: ThreadId,
-    ) -> Option<usize> {
+    fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize> {
         let this = self.eval_context_mut();
         let mutex = &mut this.machine.threads.sync.mutexes[id];
         if let Some(current_owner) = mutex.owner {
@@ -218,6 +268,9 @@ fn mutex_unlock(
                 mutex.owner = None;
                 // The mutex is completely unlocked. Try transfering ownership
                 // to another thread.
+                if let Some(data_race) = &this.memory.extra.data_race {
+                    data_race.validate_lock_release(&mut mutex.data_race, current_owner);
+                }
                 this.mutex_dequeue_and_lock(id);
             }
             Some(old_lock_count)
@@ -250,9 +303,11 @@ fn rwlock_is_locked(&self, id: RwLockId) -> bool {
         let rwlock = &this.machine.threads.sync.rwlocks[id];
         trace!(
             "rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
-            id, rwlock.writer, rwlock.readers.len(),
+            id,
+            rwlock.writer,
+            rwlock.readers.len(),
         );
-        rwlock.writer.is_some()|| rwlock.readers.is_empty().not()
+        rwlock.writer.is_some() || rwlock.readers.is_empty().not()
     }
 
     #[inline]
@@ -270,15 +325,20 @@ fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
         let this = self.eval_context_mut();
         assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
         trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
-        let count = this.machine.threads.sync.rwlocks[id].readers.entry(reader).or_insert(0);
+        let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+        let count = rwlock.readers.entry(reader).or_insert(0);
         *count = count.checked_add(1).expect("the reader counter overflowed");
+        if let Some(data_race) = &this.memory.extra.data_race {
+            data_race.validate_lock_acquire(&rwlock.data_race, reader);
+        }
     }
 
     /// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
     /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
     fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
         let this = self.eval_context_mut();
-        match this.machine.threads.sync.rwlocks[id].readers.entry(reader) {
+        let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+        match rwlock.readers.entry(reader) {
             Entry::Occupied(mut entry) => {
                 let count = entry.get_mut();
                 assert!(*count > 0, "rwlock locked with count == 0");
@@ -292,8 +352,17 @@ fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
             }
             Entry::Vacant(_) => return false, // we did not even own this lock
         }
+        if let Some(data_race) = &this.memory.extra.data_race {
+            data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
+        }
+
         // The thread was a reader. If the lock is not held any more, give it to a writer.
         if this.rwlock_is_locked(id).not() {
+            // All the readers are finished, so set the writer data-race handle to the value
+            //  of the union of all reader data race handles, since the set of readers
+            //  happen-before the writers
+            let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+            rwlock.data_race.clone_from(&rwlock.data_race_reader);
             this.rwlock_dequeue_and_lock_writer(id);
         }
         true
@@ -301,11 +370,7 @@ fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
 
     #[inline]
     /// Put the reader in the queue waiting for the lock and block it.
-    fn rwlock_enqueue_and_block_reader(
-        &mut self,
-        id: RwLockId,
-        reader: ThreadId,
-    ) {
+    fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
         let this = self.eval_context_mut();
         assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
         this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
@@ -318,7 +383,11 @@ fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
         let this = self.eval_context_mut();
         assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
         trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
-        this.machine.threads.sync.rwlocks[id].writer = Some(writer);
+        let rwlock = &mut this.machine.threads.sync.rwlocks[id];
+        rwlock.writer = Some(writer);
+        if let Some(data_race) = &this.memory.extra.data_race {
+            data_race.validate_lock_acquire(&rwlock.data_race, writer);
+        }
     }
 
     #[inline]
@@ -333,6 +402,13 @@ fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> b
             }
             rwlock.writer = None;
             trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
+            // Release memory to both reader and writer vector clocks
+            //  since this writer happens-before both the union of readers once they are finished
+            //  and the next writer
+            if let Some(data_race) = &this.memory.extra.data_race {
+                data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
+                data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
+            }
             // The thread was a writer.
             //
             // We are prioritizing writers here against the readers. As a
@@ -354,11 +430,7 @@ fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> b
 
     #[inline]
     /// Put the writer in the queue waiting for the lock.
-    fn rwlock_enqueue_and_block_writer(
-        &mut self,
-        id: RwLockId,
-        writer: ThreadId,
-    ) {
+    fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
         let this = self.eval_context_mut();
         assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
         this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
@@ -391,10 +463,20 @@ fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, mutex: MutexId) {
     /// variable.
     fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
         let this = self.eval_context_mut();
-        this.machine.threads.sync.condvars[id]
-            .waiters
-            .pop_front()
-            .map(|waiter| (waiter.thread, waiter.mutex))
+        let current_thread = this.get_active_thread();
+        let condvar = &mut this.machine.threads.sync.condvars[id];
+        let data_race = &this.memory.extra.data_race;
+
+        // Each condvar signal happens-before the end of the condvar wake
+        if let Some(data_race) = data_race {
+            data_race.validate_lock_release(&mut condvar.data_race, current_thread);
+        }
+        condvar.waiters.pop_front().map(|waiter| {
+            if let Some(data_race) = data_race {
+                data_race.validate_lock_acquire(&mut condvar.data_race, waiter.thread);
+            }
+            (waiter.thread, waiter.mutex)
+        })
     }
 
     #[inline]
@@ -403,4 +485,38 @@ fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId) {
         let this = self.eval_context_mut();
         this.machine.threads.sync.condvars[id].waiters.retain(|waiter| waiter.thread != thread);
     }
+
+    fn futex_wait(&mut self, addr: u64, thread: ThreadId) {
+        let this = self.eval_context_mut();
+        let futex = &mut this.machine.threads.sync.futexes.entry(addr).or_default();
+        let waiters = &mut futex.waiters;
+        assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
+        waiters.push_back(FutexWaiter { thread });
+    }
+
+    fn futex_wake(&mut self, addr: u64) -> Option<ThreadId> {
+        let this = self.eval_context_mut();
+        let current_thread = this.get_active_thread();
+        let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
+        let data_race = &this.memory.extra.data_race;
+
+        // Each futex-wake happens-before the end of the futex wait
+        if let Some(data_race) = data_race {
+            data_race.validate_lock_release(&mut futex.data_race, current_thread);
+        }
+        let res = futex.waiters.pop_front().map(|waiter| {
+            if let Some(data_race) = data_race {
+                data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
+            }
+            waiter.thread
+        });
+        res
+    }
+
+    fn futex_remove_waiter(&mut self, addr: u64, thread: ThreadId) {
+        let this = self.eval_context_mut();
+        if let Some(futex) = this.machine.threads.sync.futexes.get_mut(&addr) {
+            futex.waiters.retain(|waiter| waiter.thread != thread);
+        }
+    }
 }