/// Load the internal atomic memory cells if they exist.
#[inline]
fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
- match &self.atomic_ops {
- Some(op) => Some(&*op),
- None => None,
- }
+ self.atomic_ops.as_deref()
}
/// Load or create the internal atomic memory metadata
Ok(())
}
+ /// Checks if the memory cell access is ordered with all prior atomic reads and writes
+ fn race_free_with_atomic(&self, clocks: &ThreadClockSet) -> bool {
+ if let Some(atomic) = self.atomic() {
+ atomic.read_vector <= clocks.clock && atomic.write_vector <= clocks.clock
+ } else {
+ true
+ }
+ }
+
/// Update memory cell data-race tracking for atomic
/// load relaxed semantics, is a no-op if this memory was
/// not used previously as atomic memory.
#[inline]
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
let this = self.eval_context_ref();
- let old = if let Some(data_race) = &this.machine.data_race {
- data_race.multi_threaded.replace(false)
- } else {
- false
- };
+ if let Some(data_race) = &this.machine.data_race {
+ data_race.ongoing_action_data_race_free.set(true);
+ }
let result = op(this);
if let Some(data_race) = &this.machine.data_race {
- data_race.multi_threaded.set(old);
+ data_race.ongoing_action_data_race_free.set(false);
}
result
}
op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R,
) -> R {
let this = self.eval_context_mut();
- let old = if let Some(data_race) = &this.machine.data_race {
- data_race.multi_threaded.replace(false)
- } else {
- false
- };
+ if let Some(data_race) = &this.machine.data_race {
+ data_race.ongoing_action_data_race_free.set(true);
+ }
let result = op(this);
if let Some(data_race) = &this.machine.data_race {
- data_race.multi_threaded.set(old);
+ data_race.ongoing_action_data_race_free.set(false);
}
result
}
// the *value* (including the associated provenance if this is an AtomicPtr) at this location.
// Only metadata on the location itself is used.
let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
+ this.validate_overlapping_atomic(place)?;
this.buffered_atomic_read(place, atomic, scalar, || {
this.validate_atomic_load(place, atomic)
})
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
+ this.validate_overlapping_atomic(dest)?;
this.allow_data_races_mut(move |this| this.write_scalar(val, &(*dest).into()))?;
this.validate_atomic_store(dest, atomic)?;
- this.buffered_atomic_write(val, dest, atomic)
+ // FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause
+ // side effects from a read the program did not perform. So we have to initialise
+ // the store buffer with the value currently being written
+ // ONCE this is fixed please remove the hack in buffered_atomic_write() in weak_memory.rs
+ // https://github.com/rust-lang/miri/issues/2164
+ this.buffered_atomic_write(val, dest, atomic, val)
}
/// Perform an atomic operation on a memory location.
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
+ this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
// Atomics wrap around on overflow.
this.validate_atomic_rmw(place, atomic)?;
- this.buffered_atomic_rmw(val.to_scalar_or_uninit(), place, atomic)?;
+ this.buffered_atomic_rmw(
+ val.to_scalar_or_uninit(),
+ place,
+ atomic,
+ old.to_scalar_or_uninit(),
+ )?;
Ok(old)
}
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_mut();
+ this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
this.validate_atomic_rmw(place, atomic)?;
- this.buffered_atomic_rmw(new, place, atomic)?;
+ this.buffered_atomic_rmw(new, place, atomic, old)?;
Ok(old)
}
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
+ this.validate_overlapping_atomic(place)?;
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar()?.to_bool()?;
this.validate_atomic_rmw(place, atomic)?;
- this.buffered_atomic_rmw(new_val.to_scalar_or_uninit(), place, atomic)?;
+ this.buffered_atomic_rmw(
+ new_val.to_scalar_or_uninit(),
+ place,
+ atomic,
+ old.to_scalar_or_uninit(),
+ )?;
// Return the old value.
Ok(old)
use rand::Rng as _;
let this = self.eval_context_mut();
+ this.validate_overlapping_atomic(place)?;
// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successful then try again with the success
// read ordering and write in the success case.
if cmpxchg_success {
this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
this.validate_atomic_rmw(place, success)?;
- this.buffered_atomic_rmw(new, place, success)?;
+ this.buffered_atomic_rmw(new, place, success, old.to_scalar_or_uninit())?;
} else {
this.validate_atomic_load(place, fail)?;
// A failed compare exchange is equivalent to a load, reading from the latest store
// in the modification order.
// Since `old` is only a value and not the store element, we need to separately
// find it in our store buffer and perform load_impl on it.
- this.perform_read_on_buffered_latest(place, fail)?;
+ this.perform_read_on_buffered_latest(place, fail, old.to_scalar_or_uninit())?;
}
// Return the old value.
atomic: AtomicReadOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
+ this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
+ this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(
place,
atomic,
let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
let release = matches!(atomic, Release | AcqRel | SeqCst);
let this = self.eval_context_mut();
+ this.validate_overlapping_atomic(place)?;
this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
if acquire {
memory.load_acquire(clocks, index)?;
)
}
+ /// Detect racing atomic read and writes (not data races)
+ /// on every byte of the current access range
+ pub(super) fn race_free_with_atomic(&self, range: AllocRange, global: &GlobalState) -> bool {
+ if global.race_detecting() {
+ let (_, clocks) = global.current_thread_state();
+ let alloc_ranges = self.alloc_ranges.borrow();
+ for (_, range) in alloc_ranges.iter(range.start, range.size) {
+ if !range.race_free_with_atomic(&clocks) {
+ return false;
+ }
+ }
+ }
+ true
+ }
+
/// Detect data-races for an unsynchronized read operation, will not perform
- /// data-race detection if `multi-threaded` is false, either due to no threads
+ /// data-race detection if `race_detecting()` is false, either due to no threads
/// being created or if it is temporarily disabled during a racy read or write
/// operation for which data-race detection is handled separately, for example
/// atomic read operations.
range: AllocRange,
global: &GlobalState,
) -> InterpResult<'tcx> {
- if global.multi_threaded.get() {
+ if global.race_detecting() {
let (index, clocks) = global.current_thread_state();
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (offset, range) in alloc_ranges.iter_mut(range.start, range.size) {
write_type: WriteType,
global: &mut GlobalState,
) -> InterpResult<'tcx> {
- if global.multi_threaded.get() {
+ if global.race_detecting() {
let (index, clocks) = global.current_thread_state();
for (offset, range) in self.alloc_ranges.get_mut().iter_mut(range.start, range.size) {
if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) {
}
/// Detect data-races for an unsynchronized write operation, will not perform
- /// data-race threads if `multi-threaded` is false, either due to no threads
+ /// data-race threads if `race_detecting()` is false, either due to no threads
/// being created or if it is temporarily disabled during a racy read or write
/// operation
pub fn write<'tcx>(
}
/// Detect data-races for an unsynchronized deallocate operation, will not perform
- /// data-race threads if `multi-threaded` is false, either due to no threads
+ /// data-race threads if `race_detecting()` is false, either due to no threads
/// being created or if it is temporarily disabled during a racy read or write
/// operation
pub fn deallocate<'tcx>(
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
if let Some(data_race) = &this.machine.data_race {
- if data_race.multi_threaded.get() {
+ if data_race.race_detecting() {
let size = place.layout.size;
let (alloc_id, base_offset, _tag) = this.ptr_get_alloc_id(place.ptr)?;
// Load and log the atomic operation.
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
- let alloc_meta = &this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
+ let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
log::trace!(
"Atomic op({}) with ordering {:?} on {:?} (size={})",
description,
/// any data-races.
multi_threaded: Cell<bool>,
+ /// A flag to mark we are currently performing
+ /// a data race free action (such as atomic access)
+ /// to supress the race detector
+ ongoing_action_data_race_free: Cell<bool>,
+
/// Mapping of a vector index to a known set of thread
/// clocks, this is not directly mapping from a thread id
/// since it may refer to multiple threads.
pub fn new() -> Self {
let mut global_state = GlobalState {
multi_threaded: Cell::new(false),
+ ongoing_action_data_race_free: Cell::new(false),
vector_clocks: RefCell::new(IndexVec::new()),
vector_info: RefCell::new(IndexVec::new()),
thread_info: RefCell::new(IndexVec::new()),
global_state
}
+ // We perform data race detection when there are more than 1 active thread
+ // and we have not temporarily disabled race detection to perform something
+ // data race free
+ fn race_detecting(&self) -> bool {
+ self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
+ }
+
+ pub fn ongoing_action_data_race_free(&self) -> bool {
+ self.ongoing_action_data_race_free.get()
+ }
+
// Try to find vector index values that can potentially be re-used
// by a new thread instead of a new vector index being created.
fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
let thread_name = &self.thread_info.borrow()[thread].thread_name;
if let Some(name) = thread_name {
let name: &str = name;
- format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), &*name)
+ format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), name)
} else {
format!("Thread(id = {:?})", thread.to_u32())
}