//! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
//! sequences is not needed.
//!
+//! The implementation also models races with memory allocation and deallocation via treating allocation and
+//! deallocation as a type of write internally for detecting data-races.
+//!
//! This does not explore weak memory orders and so can still miss data-races
//! but should not report false-positives
//!
use rustc_target::abi::Size;
use crate::{
- ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
- OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
- VectorIdx,
+ ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MemoryKind, MiriEvalContext,
+ MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap, Scalar, ScalarMaybeUninit, Tag,
+ ThreadId, VClock, VTimestamp, VectorIdx,
};
pub type AllocExtra = VClockAlloc;
sync_vector: VClock,
}
+/// Type of write operation: allocating memory
+/// non-atomic writes and deallocating memory
+/// are all treated as writes for the purpose
+/// of the data-race detector.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum WriteType {
/// Allocate memory.
Allocate,
+
/// Standard unsynchronized write.
Write,
- /// Deallocate memory
+
+ /// Deallocate memory.
+ /// Note that when memory is deallocated first, later non-atomic accesses
+ /// will be reported as use-after-free, not as data races.
+ /// (Same for `Allocate` above.)
Deallocate,
}
impl WriteType {
fn get_descriptor(self) -> &'static str {
match self {
- WriteType::Allocate => "ALLOCATE",
- WriteType::Write => "WRITE",
- WriteType::Deallocate => "DEALLOCATE",
+ WriteType::Allocate => "Allocate",
+ WriteType::Write => "Write",
+ WriteType::Deallocate => "Deallocate",
}
}
}
}
impl MemoryCellClocks {
-
/// Create a new set of clocks representing memory allocated
/// at a given vector timestamp and index.
fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
atomic_ops: None,
}
}
-
+
/// Load the internal atomic memory cells if they exist.
#[inline]
fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
/// store relaxed semantics.
fn store_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
self.atomic_write_detect(clocks, index)?;
-
+
// The handling of release sequences was changed in C++20 and so
// the code here is different to the paper since now all relaxed
// stores block release sequences. The exception for same-thread
/// Atomic variant of read_scalar_at_offset.
fn read_scalar_at_offset_atomic(
&self,
- op: OpTy<'tcx, Tag>,
+ op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
atomic: AtomicReadOp,
// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
- this.read_scalar_atomic(value_place, atomic)
+ this.read_scalar_atomic(&value_place, atomic)
}
/// Atomic variant of write_scalar_at_offset.
fn write_scalar_at_offset_atomic(
&mut self,
- op: OpTy<'tcx, Tag>,
+ op: &OpTy<'tcx, Tag>,
offset: u64,
value: impl Into<ScalarMaybeUninit<Tag>>,
layout: TyAndLayout<'tcx>,
// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
- this.write_scalar_atomic(value.into(), value_place, atomic)
+ this.write_scalar_atomic(value.into(), &value_place, atomic)
}
/// Perform an atomic read operation at the memory location.
fn read_scalar_atomic(
&self,
- place: MPlaceTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
- let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place.into()))?;
+ let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
self.validate_atomic_load(place, atomic)?;
Ok(scalar)
}
fn write_scalar_atomic(
&mut self,
val: ScalarMaybeUninit<Tag>,
- dest: MPlaceTy<'tcx, Tag>,
+ dest: &MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
- this.allow_data_races_mut(move |this| this.write_scalar(val, dest.into()))?;
+ this.allow_data_races_mut(move |this| this.write_scalar(val, &(*dest).into()))?;
self.validate_atomic_store(dest, atomic)
}
/// Perform a atomic operation on a memory location.
fn atomic_op_immediate(
&mut self,
- place: MPlaceTy<'tcx, Tag>,
- rhs: ImmTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
+ rhs: &ImmTy<'tcx, Tag>,
op: mir::BinOp,
neg: bool,
atomic: AtomicRwOp,
) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
let this = self.eval_context_mut();
- let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
+ let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
// Atomics wrap around on overflow.
- let val = this.binary_op(op, old, rhs)?;
- let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
- this.allow_data_races_mut(|this| this.write_immediate(*val, place.into()))?;
+ let val = this.binary_op(op, &old, rhs)?;
+ let val = if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val };
+ this.allow_data_races_mut(|this| this.write_immediate(*val, &(*place).into()))?;
this.validate_atomic_rmw(place, atomic)?;
Ok(old)
/// scalar value, the old value is returned.
fn atomic_exchange_scalar(
&mut self,
- place: MPlaceTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
new: ScalarMaybeUninit<Tag>,
atomic: AtomicRwOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_mut();
- let old = this.allow_data_races_mut(|this| this.read_scalar(place.into()))?;
- this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
+ let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
+ this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
this.validate_atomic_rmw(place, atomic)?;
Ok(old)
}
+ /// Perform an conditional atomic exchange with a memory place and a new
+ /// scalar value, the old value is returned.
+ fn atomic_min_max_scalar(
+ &mut self,
+ place: &MPlaceTy<'tcx, Tag>,
+ rhs: ImmTy<'tcx, Tag>,
+ min: bool,
+ atomic: AtomicRwOp,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
+ let this = self.eval_context_mut();
+
+ let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
+ let lt = this.overflowing_binary_op(mir::BinOp::Lt, &old, &rhs)?.0.to_bool()?;
+
+ let new_val = if min {
+ if lt { &old } else { &rhs }
+ } else {
+ if lt { &rhs } else { &old }
+ };
+
+ this.allow_data_races_mut(|this| this.write_immediate_to_mplace(**new_val, place))?;
+
+ this.validate_atomic_rmw(&place, atomic)?;
+
+ // Return the old value.
+ Ok(old)
+ }
+
/// Perform an atomic compare and exchange at a given memory location.
/// On success an atomic RMW operation is performed and on failure
- /// only an atomic read occurs.
+ /// only an atomic read occurs. If `can_fail_spuriously` is true,
+ /// then we treat it as a "compare_exchange_weak" operation, and
+ /// some portion of the time fail even when the values are actually
+ /// identical.
fn atomic_compare_exchange_scalar(
&mut self,
- place: MPlaceTy<'tcx, Tag>,
- expect_old: ImmTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
+ expect_old: &ImmTy<'tcx, Tag>,
new: ScalarMaybeUninit<Tag>,
success: AtomicRwOp,
fail: AtomicReadOp,
+ can_fail_spuriously: bool,
) -> InterpResult<'tcx, Immediate<Tag>> {
+ use rand::Rng as _;
let this = self.eval_context_mut();
// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successful then try again with the success
// read ordering and write in the success case.
// Read as immediate for the sake of `binary_op()`
- let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
-
+ let old = this.allow_data_races_mut(|this| this.read_immediate(&(place.into())))?;
// `binary_op` will bail if either of them is not a scalar.
- let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
- let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
+ let eq = this.overflowing_binary_op(mir::BinOp::Eq, &old, expect_old)?.0;
+ // If the operation would succeed, but is "weak", fail some portion
+ // of the time, based on `rate`.
+ let rate = this.memory.extra.cmpxchg_weak_failure_rate;
+ let cmpxchg_success = eq.to_bool()?
+ && (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen::<f64>() < rate);
+ let res = Immediate::ScalarPair(
+ old.to_scalar_or_uninit(),
+ Scalar::from_bool(cmpxchg_success).into(),
+ );
// Update ptr depending on comparison.
// if successful, perform a full rw-atomic validation
// otherwise treat this as an atomic load with the fail ordering.
- if eq.to_bool()? {
- this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
+ if cmpxchg_success {
+ this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
this.validate_atomic_rmw(place, success)?;
} else {
this.validate_atomic_load(place, fail)?;
/// associated memory-place and on the current thread.
fn validate_atomic_load(
&self,
- place: MPlaceTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
/// associated memory-place and on the current thread.
fn validate_atomic_store(
&mut self,
- place: MPlaceTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
/// at the associated memory place and on the current thread.
fn validate_atomic_rmw(
&mut self,
- place: MPlaceTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicRwOp,
) -> InterpResult<'tcx> {
use AtomicRwOp::*;
// Either Release | AcqRel | SeqCst
clocks.apply_release_fence();
}
-
+
// Increment timestamp in case of release semantics.
Ok(atomic != AtomicFenceOp::Acquire)
})
Ok(())
}
}
+
+ fn reset_vector_clocks(&mut self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
+ let this = self.eval_context_mut();
+ if let Some(data_race) = &mut this.memory.extra.data_race {
+ if data_race.multi_threaded.get() {
+ let alloc_meta =
+ this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
+ alloc_meta.reset_clocks(ptr.offset, size);
+ }
+ }
+ Ok(())
+ }
}
/// Vector clock metadata for a logical memory allocation.
}
impl VClockAlloc {
-
/// Create a new data-race detector for newly allocated memory.
- pub fn new_allocation(global: &MemoryExtra, len: Size, track_alloc: bool) -> VClockAlloc {
- let (alloc_timestamp, alloc_index) = if track_alloc {
- let (alloc_index, clocks) = global.current_thread_state();
- let alloc_timestamp = clocks.clock[alloc_index];
- (alloc_timestamp, alloc_index)
- }else{
- (0, VectorIdx::MAX_INDEX)
+ pub fn new_allocation(
+ global: &MemoryExtra,
+ len: Size,
+ kind: MemoryKind<MiriMemoryKind>,
+ ) -> VClockAlloc {
+ let (alloc_timestamp, alloc_index) = match kind {
+ // User allocated and stack memory should track allocation.
+ MemoryKind::Machine(
+ MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap,
+ )
+ | MemoryKind::Stack => {
+ let (alloc_index, clocks) = global.current_thread_state();
+ let alloc_timestamp = clocks.clock[alloc_index];
+ (alloc_timestamp, alloc_index)
+ }
+ // Other global memory should trace races but be allocated at the 0 timestamp.
+ MemoryKind::Machine(
+ MiriMemoryKind::Global
+ | MiriMemoryKind::Machine
+ | MiriMemoryKind::Env
+ | MiriMemoryKind::ExternStatic
+ | MiriMemoryKind::Tls,
+ )
+ | MemoryKind::CallerLocation
+ | MemoryKind::Vtable => (0, VectorIdx::MAX_INDEX),
};
VClockAlloc {
global: Rc::clone(global),
alloc_ranges: RefCell::new(RangeMap::new(
- len, MemoryCellClocks::new(alloc_timestamp, alloc_index)
+ len,
+ MemoryCellClocks::new(alloc_timestamp, alloc_index),
)),
}
}
+ fn reset_clocks(&mut self, offset: Size, len: Size) {
+ let mut alloc_ranges = self.alloc_ranges.borrow_mut();
+ for (_, range) in alloc_ranges.iter_mut(offset, len) {
+ // Reset the portion of the range
+ *range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX);
+ }
+ }
+
// Find an index, if one exists where the value
// in `l` is greater than the value in `r`.
fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
+ log::trace!("Find index where not {:?} <= {:?}", l, r);
let l_slice = l.as_slice();
let r_slice = r.as_slice();
l_slice
.enumerate()
.find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
.expect("Invalid VClock Invariant");
- Some(idx)
+ Some(idx + r_slice.len())
} else {
None
}
write_clock = VClock::new_with_index(range.write_index, range.write);
(range.write_type.get_descriptor(), range.write_index, &write_clock)
} else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) {
- ("READ", idx, &range.read)
+ ("Read", idx, &range.read)
} else if !is_atomic {
if let Some(atomic) = range.atomic() {
if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock)
{
- ("ATOMIC_STORE", idx, &atomic.write_vector)
+ ("Atomic Store", idx, &atomic.write_vector)
} else if let Some(idx) =
Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock)
{
- ("ATOMIC_LOAD", idx, &atomic.read_vector)
+ ("Atomic Load", idx, &atomic.read_vector)
} else {
unreachable!(
"Failed to report data-race for non-atomic operation: no race found"
// Throw the data-race detection.
throw_ub_format!(
"Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
- \n\t\t -current vector clock = {:?}\
- \n\t\t -conflicting timestamp = {:?}",
+ \n(current vector clock = {:?}, conflicting timestamp = {:?})",
action,
current_thread_info,
other_action,
return Self::report_data_race(
&self.global,
range,
- "READ",
+ "Read",
false,
pointer,
len,
/// atomic-stores/atomic-rmw?
fn validate_atomic_op<A: Debug + Copy>(
&self,
- place: MPlaceTy<'tcx, Tag>,
+ place: &MPlaceTy<'tcx, Tag>,
atomic: A,
description: &str,
mut op: impl FnMut(
true,
place_ptr,
size,
- ).map(|_| true);
+ )
+ .map(|_| true);
}
}
vector_info.push(thread)
};
+ log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
+
// Mark the chosen vector index as in use by the thread.
thread_info[thread].vector_index = Some(created_index);
.as_ref()
.expect("Joined with thread but thread has not terminated");
-
// The join thread happens-before the current thread
// so update the current vector clock.
// Is not a release operation so the clock is not incremented.