use rustc_target::abi::Size;
use crate::{
- ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
- OpTy, Pointer, RangeMap, Scalar, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
- VectorIdx, MemoryKind, MiriMemoryKind
+ ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MemoryKind, MiriEvalContext,
+ MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap, Scalar, ScalarMaybeUninit, Tag,
+ ThreadId, VClock, VTimestamp, VectorIdx,
};
pub type AllocExtra = VClockAlloc;
atomic_ops: None,
}
}
-
+
/// Load the internal atomic memory cells if they exist.
#[inline]
fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
/// store relaxed semantics.
fn store_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
self.atomic_write_detect(clocks, index)?;
-
+
// The handling of release sequences was changed in C++20 and so
// the code here is different to the paper since now all relaxed
// stores block release sequences. The exception for same-thread
Ok(old)
}
+ /// Perform an conditional atomic exchange with a memory place and a new
+ /// scalar value, the old value is returned.
+ fn atomic_min_max_scalar(
+ &mut self,
+ place: &MPlaceTy<'tcx, Tag>,
+ rhs: ImmTy<'tcx, Tag>,
+ min: bool,
+ atomic: AtomicRwOp,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
+ let this = self.eval_context_mut();
+
+ let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
+ let lt = this.overflowing_binary_op(mir::BinOp::Lt, &old, &rhs)?.0.to_bool()?;
+
+ let new_val = if min {
+ if lt { &old } else { &rhs }
+ } else {
+ if lt { &rhs } else { &old }
+ };
+
+ this.allow_data_races_mut(|this| this.write_immediate_to_mplace(**new_val, place))?;
+
+ this.validate_atomic_rmw(&place, atomic)?;
+
+ // Return the old value.
+ Ok(old)
+ }
+
/// Perform an atomic compare and exchange at a given memory location.
/// On success an atomic RMW operation is performed and on failure
/// only an atomic read occurs. If `can_fail_spuriously` is true,
// Either Release | AcqRel | SeqCst
clocks.apply_release_fence();
}
-
+
// Increment timestamp in case of release semantics.
Ok(atomic != AtomicFenceOp::Acquire)
})
}
}
- fn reset_vector_clocks(
- &mut self,
- ptr: Pointer<Tag>,
- size: Size
- ) -> InterpResult<'tcx> {
+ fn reset_vector_clocks(&mut self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &mut this.memory.extra.data_race {
if data_race.multi_threaded.get() {
- let alloc_meta = this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
+ let alloc_meta =
+ this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
alloc_meta.reset_clocks(ptr.offset, size);
}
}
impl VClockAlloc {
/// Create a new data-race detector for newly allocated memory.
- pub fn new_allocation(global: &MemoryExtra, len: Size, kind: MemoryKind<MiriMemoryKind>) -> VClockAlloc {
+ pub fn new_allocation(
+ global: &MemoryExtra,
+ len: Size,
+ kind: MemoryKind<MiriMemoryKind>,
+ ) -> VClockAlloc {
let (alloc_timestamp, alloc_index) = match kind {
// User allocated and stack memory should track allocation.
MemoryKind::Machine(
- MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
- ) | MemoryKind::Stack => {
+ MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap,
+ )
+ | MemoryKind::Stack => {
let (alloc_index, clocks) = global.current_thread_state();
let alloc_timestamp = clocks.clock[alloc_index];
(alloc_timestamp, alloc_index)
}
// Other global memory should trace races but be allocated at the 0 timestamp.
MemoryKind::Machine(
- MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
- MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
- ) | MemoryKind::CallerLocation | MemoryKind::Vtable => {
- (0, VectorIdx::MAX_INDEX)
- }
+ MiriMemoryKind::Global
+ | MiriMemoryKind::Machine
+ | MiriMemoryKind::Env
+ | MiriMemoryKind::ExternStatic
+ | MiriMemoryKind::Tls,
+ )
+ | MemoryKind::CallerLocation
+ | MemoryKind::Vtable => (0, VectorIdx::MAX_INDEX),
};
VClockAlloc {
global: Rc::clone(global),
alloc_ranges: RefCell::new(RangeMap::new(
- len, MemoryCellClocks::new(alloc_timestamp, alloc_index)
+ len,
+ MemoryCellClocks::new(alloc_timestamp, alloc_index),
)),
}
}
true,
place_ptr,
size,
- ).map(|_| true);
+ )
+ .map(|_| true);
}
}
.as_ref()
.expect("Joined with thread but thread has not terminated");
-
// The join thread happens-before the current thread
// so update the current vector clock.
// Is not a release operation so the clock is not incremented.