use std::collections::{hash_map::Entry, HashMap, VecDeque};
-use std::convert::TryFrom;
use std::num::NonZeroU32;
use std::ops::Not;
}
impl $name {
- pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Tag> {
+ pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Provenance> {
Scalar::from_u32(self.0.get())
}
}
struct FutexWaiter {
/// The thread that is waiting on this futex.
thread: ThreadId,
+ /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
+ bitset: u32,
}
/// The state of all synchronization variables.
mutexes: IndexVec<MutexId, Mutex>,
rwlocks: IndexVec<RwLockId, RwLock>,
condvars: IndexVec<CondvarId, Condvar>,
- futexes: HashMap<Pointer, Futex>,
+ futexes: HashMap<u64, Futex>,
}
// Private extension trait for local helper methods
this.machine.threads.sync.mutexes.push(Default::default())
}
+ #[inline]
+ /// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
+ /// otherwise returns the value from the closure
+ fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
+ where
+ F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
+ {
+ let this = self.eval_context_mut();
+ let next_index = this.machine.threads.sync.mutexes.next_index();
+ if let Some(old) = existing(this, next_index)? {
+ Ok(old)
+ } else {
+ let new_index = this.machine.threads.sync.mutexes.push(Default::default());
+ assert_eq!(next_index, new_index);
+ Ok(new_index)
+ }
+ }
+
#[inline]
/// Get the id of the thread that currently owns this lock.
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
mutex.owner = Some(thread);
}
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
- if let Some(data_race) = &this.memory.extra.data_race {
+ if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(&mutex.data_race, thread);
}
}
mutex.owner = None;
// The mutex is completely unlocked. Try transfering ownership
// to another thread.
- if let Some(data_race) = &this.memory.extra.data_race {
+ if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut mutex.data_race, current_owner);
}
this.mutex_dequeue_and_lock(id);
this.machine.threads.sync.rwlocks.push(Default::default())
}
+ #[inline]
+ /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
+ /// otherwise returns the value from the closure
+ fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
+ where
+ F: FnOnce(
+ &mut MiriEvalContext<'mir, 'tcx>,
+ RwLockId,
+ ) -> InterpResult<'tcx, Option<RwLockId>>,
+ {
+ let this = self.eval_context_mut();
+ let next_index = this.machine.threads.sync.rwlocks.next_index();
+ if let Some(old) = existing(this, next_index)? {
+ Ok(old)
+ } else {
+ let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
+ assert_eq!(next_index, new_index);
+ Ok(new_index)
+ }
+ }
+
#[inline]
/// Check if locked.
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
let count = rwlock.readers.entry(reader).or_insert(0);
*count = count.checked_add(1).expect("the reader counter overflowed");
- if let Some(data_race) = &this.memory.extra.data_race {
+ if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(&rwlock.data_race, reader);
}
}
}
Entry::Vacant(_) => return false, // we did not even own this lock
}
- if let Some(data_race) = &this.memory.extra.data_race {
+ if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
}
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
rwlock.writer = Some(writer);
- if let Some(data_race) = &this.memory.extra.data_race {
+ if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_acquire(&rwlock.data_race, writer);
}
}
// Release memory to both reader and writer vector clocks
// since this writer happens-before both the union of readers once they are finished
// and the next writer
- if let Some(data_race) = &this.memory.extra.data_race {
+ if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
}
this.machine.threads.sync.condvars.push(Default::default())
}
+ #[inline]
+ /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
+ /// otherwise returns the value from the closure
+ fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
+ where
+ F: FnOnce(
+ &mut MiriEvalContext<'mir, 'tcx>,
+ CondvarId,
+ ) -> InterpResult<'tcx, Option<CondvarId>>,
+ {
+ let this = self.eval_context_mut();
+ let next_index = this.machine.threads.sync.condvars.next_index();
+ if let Some(old) = existing(this, next_index)? {
+ Ok(old)
+ } else {
+ let new_index = this.machine.threads.sync.condvars.push(Default::default());
+ assert_eq!(next_index, new_index);
+ Ok(new_index)
+ }
+ }
+
#[inline]
/// Is the conditional variable awaited?
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let condvar = &mut this.machine.threads.sync.condvars[id];
- let data_race = &this.memory.extra.data_race;
+ let data_race = &this.machine.data_race;
// Each condvar signal happens-before the end of the condvar wake
if let Some(data_race) = data_race {
}
condvar.waiters.pop_front().map(|waiter| {
if let Some(data_race) = data_race {
- data_race.validate_lock_acquire(&mut condvar.data_race, waiter.thread);
+ data_race.validate_lock_acquire(&condvar.data_race, waiter.thread);
}
(waiter.thread, waiter.mutex)
})
this.machine.threads.sync.condvars[id].waiters.retain(|waiter| waiter.thread != thread);
}
- fn futex_wait(&mut self, addr: Pointer<stacked_borrows::Tag>, thread: ThreadId) {
+ fn futex_wait(&mut self, addr: u64, thread: ThreadId, bitset: u32) {
let this = self.eval_context_mut();
- let futex = &mut this.machine.threads.sync.futexes.entry(addr.erase_tag()).or_default();
+ let futex = &mut this.machine.threads.sync.futexes.entry(addr).or_default();
let waiters = &mut futex.waiters;
assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
- waiters.push_back(FutexWaiter { thread });
+ waiters.push_back(FutexWaiter { thread, bitset });
}
- fn futex_wake(&mut self, addr: Pointer<stacked_borrows::Tag>) -> Option<ThreadId> {
+ fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
- let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr.erase_tag())?;
- let data_race = &this.memory.extra.data_race;
+ let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
+ let data_race = &this.machine.data_race;
// Each futex-wake happens-before the end of the futex wait
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut futex.data_race, current_thread);
}
- let res = futex.waiters.pop_front().map(|waiter| {
+
+ // Wake up the first thread in the queue that matches any of the bits in the bitset.
+ futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| {
+ let waiter = futex.waiters.remove(i).unwrap();
if let Some(data_race) = data_race {
data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
}
waiter.thread
- });
- res
+ })
}
- fn futex_remove_waiter(&mut self, addr: Pointer<stacked_borrows::Tag>, thread: ThreadId) {
+ fn futex_remove_waiter(&mut self, addr: u64, thread: ThreadId) {
let this = self.eval_context_mut();
- if let Some(futex) = this.machine.threads.sync.futexes.get_mut(&addr.erase_tag()) {
+ if let Some(futex) = this.machine.threads.sync.futexes.get_mut(&addr) {
futex.waiters.retain(|waiter| waiter.thread != thread);
}
}