1 //! Implementation of a data-race detector using Lamport Timestamps / Vector-clocks
2 //! based on the Dynamic Race Detection for C++:
3 //! <https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf>
4 //! which does not report false-positives when fences are used, and gives better
5 //! accuracy in presence of read-modify-write operations.
7 //! The implementation contains modifications to correctly model the changes to the memory model in C++20
8 //! regarding the weakening of release sequences: <http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0982r1.html>.
9 //! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
10 //! sequences is not needed.
12 //! The implementation also models races with memory allocation and deallocation via treating allocation and
13 //! deallocation as a type of write internally for detecting data-races.
15 //! Weak memory orders are explored but not all weak behaviours are exhibited, so it can still miss data-races
16 //! but should not report false-positives
18 //! Data-race definition from(<https://en.cppreference.com/w/cpp/language/memory_model#Threads_and_data_races>):
19 //! a data race occurs between two memory accesses if they are on different threads, at least one operation
20 //! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
21 //! for full definition.
23 //! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
24 //! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
25 //! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
26 //! on are not considered. Since the thread's vector clock will only increase and a data-race implies that
27 //! there is some index x where `clock[x] > thread_clock`, when this is true `clock[candidate-idx] > thread_clock`
28 //! can never hold and hence a data-race can never be reported in that vector index again.
29 //! This means that the thread-index can be safely re-used, starting on the next timestamp for the newly created
32 //! The timestamps used in the data-race detector assign each sequence of non-atomic operations
33 //! followed by a single atomic or concurrent operation a single timestamp.
34 //! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
35 //! This is because extra increment operations between the operations in the sequence are not
36 //! required for accurate reporting of data-race values.
38 //! As per the paper a threads timestamp is only incremented after a release operation is performed
39 //! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared
40 //! code some atomic operations may increment the timestamp when not necessary but this has no effect
41 //! on the data-race detection code.
44 cell::{Cell, Ref, RefCell, RefMut},
49 use rustc_ast::Mutability;
50 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
51 use rustc_index::vec::{Idx, IndexVec};
52 use rustc_middle::mir;
53 use rustc_target::abi::{Align, Size};
58 vector_clock::{VClock, VTimestamp, VectorIdx},
59 weak_memory::EvalContextExt as _,
62 pub type AllocExtra = VClockAlloc;
64 /// Valid atomic read-write orderings, alias of atomic::Ordering (not non-exhaustive).
65 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
66 pub enum AtomicRwOrd {
74 /// Valid atomic read orderings, subset of atomic::Ordering.
75 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
76 pub enum AtomicReadOrd {
82 /// Valid atomic write orderings, subset of atomic::Ordering.
83 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
84 pub enum AtomicWriteOrd {
90 /// Valid atomic fence orderings, subset of atomic::Ordering.
91 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
92 pub enum AtomicFenceOrd {
99 /// The current set of vector clocks describing the state
100 /// of a thread, contains the happens-before clock and
101 /// additional metadata to model atomic fence operations.
102 #[derive(Clone, Default, Debug)]
103 pub(super) struct ThreadClockSet {
104 /// The increasing clock representing timestamps
105 /// that happen-before this thread.
106 pub(super) clock: VClock,
108 /// The set of timestamps that will happen-before this
109 /// thread once it performs an acquire fence.
110 fence_acquire: VClock,
112 /// The last timestamp of happens-before relations that
113 /// have been released by this thread by a fence.
114 fence_release: VClock,
116 /// Timestamps of the last SC fence performed by each
117 /// thread, updated when this thread performs an SC fence
118 pub(super) fence_seqcst: VClock,
120 /// Timestamps of the last SC write performed by each
121 /// thread, updated when this thread performs an SC fence
122 pub(super) write_seqcst: VClock,
124 /// Timestamps of the last SC fence performed by each
125 /// thread, updated when this thread performs an SC read
126 pub(super) read_seqcst: VClock,
129 impl ThreadClockSet {
130 /// Apply the effects of a release fence to this
131 /// set of thread vector clocks.
133 fn apply_release_fence(&mut self) {
134 self.fence_release.clone_from(&self.clock);
137 /// Apply the effects of an acquire fence to this
138 /// set of thread vector clocks.
140 fn apply_acquire_fence(&mut self) {
141 self.clock.join(&self.fence_acquire);
144 /// Increment the happens-before clock at a
147 fn increment_clock(&mut self, index: VectorIdx) {
148 self.clock.increment_index(index);
151 /// Join the happens-before clock with that of
152 /// another thread, used to model thread join
154 fn join_with(&mut self, other: &ThreadClockSet) {
155 self.clock.join(&other.clock);
159 /// Error returned by finding a data race
160 /// should be elaborated upon.
161 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
164 /// Externally stored memory cell clocks
165 /// explicitly to reduce memory usage for the
166 /// common case where no atomic operations
167 /// exists on the memory cell.
168 #[derive(Clone, PartialEq, Eq, Default, Debug)]
169 struct AtomicMemoryCellClocks {
170 /// The clock-vector of the timestamp of the last atomic
171 /// read operation performed by each thread.
172 /// This detects potential data-races between atomic read
173 /// and non-atomic write operations.
176 /// The clock-vector of the timestamp of the last atomic
177 /// write operation performed by each thread.
178 /// This detects potential data-races between atomic write
179 /// and non-atomic read or write operations.
180 write_vector: VClock,
182 /// Synchronization vector for acquire-release semantics
183 /// contains the vector of timestamps that will
184 /// happen-before a thread if an acquire-load is
185 /// performed on the data.
189 /// Type of write operation: allocating memory
190 /// non-atomic writes and deallocating memory
191 /// are all treated as writes for the purpose
192 /// of the data-race detector.
193 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
198 /// Standard unsynchronized write.
201 /// Deallocate memory.
202 /// Note that when memory is deallocated first, later non-atomic accesses
203 /// will be reported as use-after-free, not as data races.
204 /// (Same for `Allocate` above.)
208 fn get_descriptor(self) -> &'static str {
210 WriteType::Allocate => "Allocate",
211 WriteType::Write => "Write",
212 WriteType::Deallocate => "Deallocate",
217 /// Memory Cell vector clock metadata
218 /// for data-race detection.
219 #[derive(Clone, PartialEq, Eq, Debug)]
220 struct MemoryCellClocks {
221 /// The vector-clock timestamp of the last write
222 /// corresponding to the writing threads timestamp.
225 /// The identifier of the vector index, corresponding to a thread
226 /// that performed the last write operation.
227 write_index: VectorIdx,
229 /// The type of operation that the write index represents,
230 /// either newly allocated memory, a non-atomic write or
231 /// a deallocation of memory.
232 write_type: WriteType,
234 /// The vector-clock of the timestamp of the last read operation
235 /// performed by a thread since the last write operation occurred.
236 /// It is reset to zero on each write operation.
239 /// Atomic acquire & release sequence tracking clocks.
240 /// For non-atomic memory in the common case this
241 /// value is set to None.
242 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
245 impl MemoryCellClocks {
246 /// Create a new set of clocks representing memory allocated
247 /// at a given vector timestamp and index.
248 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
250 read: VClock::default(),
252 write_index: alloc_index,
253 write_type: WriteType::Allocate,
258 /// Load the internal atomic memory cells if they exist.
260 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
261 self.atomic_ops.as_deref()
264 /// Load or create the internal atomic memory metadata
265 /// if it does not exist.
267 fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
268 self.atomic_ops.get_or_insert_with(Default::default)
271 /// Update memory cell data-race tracking for atomic
272 /// load acquire semantics, is a no-op if this memory was
273 /// not used previously as atomic memory.
276 clocks: &mut ThreadClockSet,
278 ) -> Result<(), DataRace> {
279 self.atomic_read_detect(clocks, index)?;
280 if let Some(atomic) = self.atomic() {
281 clocks.clock.join(&atomic.sync_vector);
286 /// Checks if the memory cell access is ordered with all prior atomic reads and writes
287 fn race_free_with_atomic(&self, clocks: &ThreadClockSet) -> bool {
288 if let Some(atomic) = self.atomic() {
289 atomic.read_vector <= clocks.clock && atomic.write_vector <= clocks.clock
295 /// Update memory cell data-race tracking for atomic
296 /// load relaxed semantics, is a no-op if this memory was
297 /// not used previously as atomic memory.
300 clocks: &mut ThreadClockSet,
302 ) -> Result<(), DataRace> {
303 self.atomic_read_detect(clocks, index)?;
304 if let Some(atomic) = self.atomic() {
305 clocks.fence_acquire.join(&atomic.sync_vector);
310 /// Update the memory cell data-race tracking for atomic
311 /// store release semantics.
312 fn store_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
313 self.atomic_write_detect(clocks, index)?;
314 let atomic = self.atomic_mut();
315 atomic.sync_vector.clone_from(&clocks.clock);
319 /// Update the memory cell data-race tracking for atomic
320 /// store relaxed semantics.
321 fn store_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
322 self.atomic_write_detect(clocks, index)?;
324 // The handling of release sequences was changed in C++20 and so
325 // the code here is different to the paper since now all relaxed
326 // stores block release sequences. The exception for same-thread
327 // relaxed stores has been removed.
328 let atomic = self.atomic_mut();
329 atomic.sync_vector.clone_from(&clocks.fence_release);
333 /// Update the memory cell data-race tracking for atomic
334 /// store release semantics for RMW operations.
335 fn rmw_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
336 self.atomic_write_detect(clocks, index)?;
337 let atomic = self.atomic_mut();
338 atomic.sync_vector.join(&clocks.clock);
342 /// Update the memory cell data-race tracking for atomic
343 /// store relaxed semantics for RMW operations.
344 fn rmw_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
345 self.atomic_write_detect(clocks, index)?;
346 let atomic = self.atomic_mut();
347 atomic.sync_vector.join(&clocks.fence_release);
351 /// Detect data-races with an atomic read, caused by a non-atomic write that does
352 /// not happen-before the atomic-read.
353 fn atomic_read_detect(
355 clocks: &ThreadClockSet,
357 ) -> Result<(), DataRace> {
358 log::trace!("Atomic read with vectors: {:#?} :: {:#?}", self, clocks);
359 if self.write <= clocks.clock[self.write_index] {
360 let atomic = self.atomic_mut();
361 atomic.read_vector.set_at_index(&clocks.clock, index);
368 /// Detect data-races with an atomic write, either with a non-atomic read or with
369 /// a non-atomic write.
370 fn atomic_write_detect(
372 clocks: &ThreadClockSet,
374 ) -> Result<(), DataRace> {
375 log::trace!("Atomic write with vectors: {:#?} :: {:#?}", self, clocks);
376 if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
377 let atomic = self.atomic_mut();
378 atomic.write_vector.set_at_index(&clocks.clock, index);
385 /// Detect races for non-atomic read operations at the current memory cell
386 /// returns true if a data-race is detected.
389 clocks: &ThreadClockSet,
391 ) -> Result<(), DataRace> {
392 log::trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, clocks);
393 if self.write <= clocks.clock[self.write_index] {
394 let race_free = if let Some(atomic) = self.atomic() {
395 atomic.write_vector <= clocks.clock
400 self.read.set_at_index(&clocks.clock, index);
410 /// Detect races for non-atomic write operations at the current memory cell
411 /// returns true if a data-race is detected.
412 fn write_race_detect(
414 clocks: &ThreadClockSet,
416 write_type: WriteType,
417 ) -> Result<(), DataRace> {
418 log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
419 if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
420 let race_free = if let Some(atomic) = self.atomic() {
421 atomic.write_vector <= clocks.clock && atomic.read_vector <= clocks.clock
426 self.write = clocks.clock[index];
427 self.write_index = index;
428 self.write_type = write_type;
429 self.read.set_zero_vector();
440 /// Evaluation context extensions.
441 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {}
442 pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
443 /// Perform an atomic read operation at the memory location.
444 fn read_scalar_atomic(
446 place: &MPlaceTy<'tcx, Provenance>,
447 atomic: AtomicReadOrd,
448 ) -> InterpResult<'tcx, Scalar<Provenance>> {
449 let this = self.eval_context_ref();
450 this.atomic_access_check(place)?;
451 // This will read from the last store in the modification order of this location. In case
452 // weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
453 // This is fine with StackedBorrow and race checks because they don't concern metadata on
454 // the *value* (including the associated provenance if this is an AtomicPtr) at this location.
455 // Only metadata on the location itself is used.
456 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
457 this.validate_overlapping_atomic(place)?;
458 this.buffered_atomic_read(place, atomic, scalar, || {
459 this.validate_atomic_load(place, atomic)
463 /// Perform an atomic write operation at the memory location.
464 fn write_scalar_atomic(
466 val: Scalar<Provenance>,
467 dest: &MPlaceTy<'tcx, Provenance>,
468 atomic: AtomicWriteOrd,
469 ) -> InterpResult<'tcx> {
470 let this = self.eval_context_mut();
471 this.atomic_access_check(dest)?;
473 this.validate_overlapping_atomic(dest)?;
474 this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
475 this.validate_atomic_store(dest, atomic)?;
476 // FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause
477 // side effects from a read the program did not perform. So we have to initialise
478 // the store buffer with the value currently being written
479 // ONCE this is fixed please remove the hack in buffered_atomic_write() in weak_memory.rs
480 // https://github.com/rust-lang/miri/issues/2164
481 this.buffered_atomic_write(val, dest, atomic, val)
484 /// Perform an atomic operation on a memory location.
485 fn atomic_op_immediate(
487 place: &MPlaceTy<'tcx, Provenance>,
488 rhs: &ImmTy<'tcx, Provenance>,
492 ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
493 let this = self.eval_context_mut();
494 this.atomic_access_check(place)?;
496 this.validate_overlapping_atomic(place)?;
497 let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
499 // Atomics wrap around on overflow.
500 let val = this.binary_op(op, &old, rhs)?;
501 let val = if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val };
502 this.allow_data_races_mut(|this| this.write_immediate(*val, &place.into()))?;
504 this.validate_atomic_rmw(place, atomic)?;
506 this.buffered_atomic_rmw(val.to_scalar(), place, atomic, old.to_scalar())?;
510 /// Perform an atomic exchange with a memory place and a new
511 /// scalar value, the old value is returned.
512 fn atomic_exchange_scalar(
514 place: &MPlaceTy<'tcx, Provenance>,
515 new: Scalar<Provenance>,
517 ) -> InterpResult<'tcx, Scalar<Provenance>> {
518 let this = self.eval_context_mut();
519 this.atomic_access_check(place)?;
521 this.validate_overlapping_atomic(place)?;
522 let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
523 this.allow_data_races_mut(|this| this.write_scalar(new, &place.into()))?;
525 this.validate_atomic_rmw(place, atomic)?;
527 this.buffered_atomic_rmw(new, place, atomic, old)?;
531 /// Perform an conditional atomic exchange with a memory place and a new
532 /// scalar value, the old value is returned.
533 fn atomic_min_max_scalar(
535 place: &MPlaceTy<'tcx, Provenance>,
536 rhs: ImmTy<'tcx, Provenance>,
539 ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
540 let this = self.eval_context_mut();
541 this.atomic_access_check(place)?;
543 this.validate_overlapping_atomic(place)?;
544 let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
545 let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
547 let new_val = if min {
548 if lt { &old } else { &rhs }
550 if lt { &rhs } else { &old }
553 this.allow_data_races_mut(|this| this.write_immediate(**new_val, &place.into()))?;
555 this.validate_atomic_rmw(place, atomic)?;
557 this.buffered_atomic_rmw(new_val.to_scalar(), place, atomic, old.to_scalar())?;
559 // Return the old value.
563 /// Perform an atomic compare and exchange at a given memory location.
564 /// On success an atomic RMW operation is performed and on failure
565 /// only an atomic read occurs. If `can_fail_spuriously` is true,
566 /// then we treat it as a "compare_exchange_weak" operation, and
567 /// some portion of the time fail even when the values are actually
569 fn atomic_compare_exchange_scalar(
571 place: &MPlaceTy<'tcx, Provenance>,
572 expect_old: &ImmTy<'tcx, Provenance>,
573 new: Scalar<Provenance>,
574 success: AtomicRwOrd,
576 can_fail_spuriously: bool,
577 ) -> InterpResult<'tcx, Immediate<Provenance>> {
579 let this = self.eval_context_mut();
580 this.atomic_access_check(place)?;
582 this.validate_overlapping_atomic(place)?;
583 // Failure ordering cannot be stronger than success ordering, therefore first attempt
584 // to read with the failure ordering and if successful then try again with the success
585 // read ordering and write in the success case.
586 // Read as immediate for the sake of `binary_op()`
587 let old = this.allow_data_races_mut(|this| this.read_immediate(&(place.into())))?;
588 // `binary_op` will bail if either of them is not a scalar.
589 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
590 // If the operation would succeed, but is "weak", fail some portion
591 // of the time, based on `success_rate`.
592 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
593 let cmpxchg_success = eq.to_scalar().to_bool()?
594 && if can_fail_spuriously {
595 this.machine.rng.get_mut().gen_bool(success_rate)
599 let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
601 // Update ptr depending on comparison.
602 // if successful, perform a full rw-atomic validation
603 // otherwise treat this as an atomic load with the fail ordering.
605 this.allow_data_races_mut(|this| this.write_scalar(new, &place.into()))?;
606 this.validate_atomic_rmw(place, success)?;
607 this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
609 this.validate_atomic_load(place, fail)?;
610 // A failed compare exchange is equivalent to a load, reading from the latest store
611 // in the modification order.
612 // Since `old` is only a value and not the store element, we need to separately
613 // find it in our store buffer and perform load_impl on it.
614 this.perform_read_on_buffered_latest(place, fail, old.to_scalar())?;
617 // Return the old value.
621 /// Update the data-race detector for an atomic fence on the current thread.
622 fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
623 let this = self.eval_context_mut();
624 if let Some(data_race) = &mut this.machine.data_race {
625 data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| {
626 log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
628 // Apply data-race detection for the current fences
629 // this treats AcqRel and SeqCst as the same as an acquire
630 // and release fence applied in the same timestamp.
631 if atomic != AtomicFenceOrd::Release {
632 // Either Acquire | AcqRel | SeqCst
633 clocks.apply_acquire_fence();
635 if atomic != AtomicFenceOrd::Acquire {
636 // Either Release | AcqRel | SeqCst
637 clocks.apply_release_fence();
639 if atomic == AtomicFenceOrd::SeqCst {
640 data_race.last_sc_fence.borrow_mut().set_at_index(&clocks.clock, index);
641 clocks.fence_seqcst.join(&data_race.last_sc_fence.borrow());
642 clocks.write_seqcst.join(&data_race.last_sc_write.borrow());
645 // Increment timestamp in case of release semantics.
646 Ok(atomic != AtomicFenceOrd::Acquire)
653 /// After all threads are done running, this allows data races to occur for subsequent
654 /// 'administrative' machine accesses (that logically happen outside of the Abstract Machine).
655 fn allow_data_races_all_threads_done(&mut self) {
656 let this = self.eval_context_ref();
657 assert!(this.have_all_terminated());
658 if let Some(data_race) = &this.machine.data_race {
659 let old = data_race.ongoing_action_data_race_free.replace(true);
660 assert!(!old, "cannot nest allow_data_races");
665 /// Vector clock metadata for a logical memory allocation.
666 #[derive(Debug, Clone)]
667 pub struct VClockAlloc {
668 /// Assigning each byte a MemoryCellClocks.
669 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
672 impl VisitTags for VClockAlloc {
673 fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
679 /// Create a new data-race detector for newly allocated memory.
680 pub fn new_allocation(
681 global: &GlobalState,
682 thread_mgr: &ThreadManager<'_, '_>,
684 kind: MemoryKind<MiriMemoryKind>,
686 let (alloc_timestamp, alloc_index) = match kind {
687 // User allocated and stack memory should track allocation.
690 | MiriMemoryKind::Miri
692 | MiriMemoryKind::WinHeap,
694 | MemoryKind::Stack => {
695 let (alloc_index, clocks) = global.current_thread_state(thread_mgr);
696 let alloc_timestamp = clocks.clock[alloc_index];
697 (alloc_timestamp, alloc_index)
699 // Other global memory should trace races but be allocated at the 0 timestamp.
701 MiriMemoryKind::Global
702 | MiriMemoryKind::Machine
703 | MiriMemoryKind::Runtime
704 | MiriMemoryKind::ExternStatic
705 | MiriMemoryKind::Tls,
707 | MemoryKind::CallerLocation => (0, VectorIdx::MAX_INDEX),
710 alloc_ranges: RefCell::new(RangeMap::new(
712 MemoryCellClocks::new(alloc_timestamp, alloc_index),
717 // Find an index, if one exists where the value
718 // in `l` is greater than the value in `r`.
719 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
720 log::trace!("Find index where not {:?} <= {:?}", l, r);
721 let l_slice = l.as_slice();
722 let r_slice = r.as_slice();
727 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
729 if l_slice.len() > r_slice.len() {
730 // By invariant, if l_slice is longer
731 // then one element must be larger.
732 // This just validates that this is true
733 // and reports earlier elements first.
734 let l_remainder_slice = &l_slice[r_slice.len()..];
735 let idx = l_remainder_slice
738 .find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
739 .expect("Invalid VClock Invariant");
740 Some(idx + r_slice.len())
748 /// Report a data-race found in the program.
749 /// This finds the two racing threads and the type
750 /// of data-race that occurred. This will also
751 /// return info about the memory location the data-race
755 fn report_data_race<'tcx>(
756 global: &GlobalState,
757 thread_mgr: &ThreadManager<'_, '_>,
758 range: &MemoryCellClocks,
761 ptr_dbg: Pointer<AllocId>,
762 ) -> InterpResult<'tcx> {
763 let (current_index, current_clocks) = global.current_thread_state(thread_mgr);
765 let (other_action, other_thread, _other_clock) = if range.write
766 > current_clocks.clock[range.write_index]
768 // Convert the write action into the vector clock it
769 // represents for diagnostic purposes.
770 write_clock = VClock::new_with_index(range.write_index, range.write);
771 (range.write_type.get_descriptor(), range.write_index, &write_clock)
772 } else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) {
773 ("Read", idx, &range.read)
774 } else if !is_atomic {
775 if let Some(atomic) = range.atomic() {
776 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock)
778 ("Atomic Store", idx, &atomic.write_vector)
779 } else if let Some(idx) =
780 Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock)
782 ("Atomic Load", idx, &atomic.read_vector)
785 "Failed to report data-race for non-atomic operation: no race found"
790 "Failed to report data-race for non-atomic operation: no atomic component"
794 unreachable!("Failed to report data-race for atomic operation")
797 // Load elaborated thread information about the racing thread actions.
798 let current_thread_info = global.print_thread_metadata(thread_mgr, current_index);
799 let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
801 // Throw the data-race detection.
803 "Data race detected between {} on {} and {} on {} at {:?}",
812 /// Detect racing atomic read and writes (not data races)
813 /// on every byte of the current access range
814 pub(super) fn race_free_with_atomic(
817 global: &GlobalState,
818 thread_mgr: &ThreadManager<'_, '_>,
820 if global.race_detecting() {
821 let (_, clocks) = global.current_thread_state(thread_mgr);
822 let alloc_ranges = self.alloc_ranges.borrow();
823 for (_, range) in alloc_ranges.iter(range.start, range.size) {
824 if !range.race_free_with_atomic(&clocks) {
832 /// Detect data-races for an unsynchronized read operation, will not perform
833 /// data-race detection if `race_detecting()` is false, either due to no threads
834 /// being created or if it is temporarily disabled during a racy read or write
835 /// operation for which data-race detection is handled separately, for example
836 /// atomic read operations.
841 global: &GlobalState,
842 thread_mgr: &ThreadManager<'_, '_>,
843 ) -> InterpResult<'tcx> {
844 if global.race_detecting() {
845 let (index, clocks) = global.current_thread_state(thread_mgr);
846 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
847 for (offset, range) in alloc_ranges.iter_mut(range.start, range.size) {
848 if let Err(DataRace) = range.read_race_detect(&clocks, index) {
850 return Self::report_data_race(
856 Pointer::new(alloc_id, offset),
866 // Shared code for detecting data-races on unique access to a section of memory
867 fn unique_access<'tcx>(
871 write_type: WriteType,
872 global: &mut GlobalState,
873 thread_mgr: &ThreadManager<'_, '_>,
874 ) -> InterpResult<'tcx> {
875 if global.race_detecting() {
876 let (index, clocks) = global.current_thread_state(thread_mgr);
877 for (offset, range) in self.alloc_ranges.get_mut().iter_mut(range.start, range.size) {
878 if let Err(DataRace) = range.write_race_detect(&clocks, index, write_type) {
880 return Self::report_data_race(
884 write_type.get_descriptor(),
886 Pointer::new(alloc_id, offset),
896 /// Detect data-races for an unsynchronized write operation, will not perform
897 /// data-race threads if `race_detecting()` is false, either due to no threads
898 /// being created or if it is temporarily disabled during a racy read or write
904 global: &mut GlobalState,
905 thread_mgr: &ThreadManager<'_, '_>,
906 ) -> InterpResult<'tcx> {
907 self.unique_access(alloc_id, range, WriteType::Write, global, thread_mgr)
910 /// Detect data-races for an unsynchronized deallocate operation, will not perform
911 /// data-race threads if `race_detecting()` is false, either due to no threads
912 /// being created or if it is temporarily disabled during a racy read or write
914 pub fn deallocate<'tcx>(
918 global: &mut GlobalState,
919 thread_mgr: &ThreadManager<'_, '_>,
920 ) -> InterpResult<'tcx> {
921 self.unique_access(alloc_id, range, WriteType::Deallocate, global, thread_mgr)
925 impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {}
926 trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
927 /// Temporarily allow data-races to occur. This should only be used in
928 /// one of these cases:
929 /// - One of the appropriate `validate_atomic` functions will be called to
930 /// to treat a memory access as atomic.
931 /// - The memory being accessed should be treated as internal state, that
932 /// cannot be accessed by the interpreted program.
933 /// - Execution of the interpreted program execution has halted.
935 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'mir, 'tcx>) -> R) -> R {
936 let this = self.eval_context_ref();
937 if let Some(data_race) = &this.machine.data_race {
938 let old = data_race.ongoing_action_data_race_free.replace(true);
939 assert!(!old, "cannot nest allow_data_races");
941 let result = op(this);
942 if let Some(data_race) = &this.machine.data_race {
943 data_race.ongoing_action_data_race_free.set(false);
948 /// Same as `allow_data_races_ref`, this temporarily disables any data-race detection and
949 /// so should only be used for atomic operations or internal state that the program cannot
952 fn allow_data_races_mut<R>(
954 op: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> R,
956 let this = self.eval_context_mut();
957 if let Some(data_race) = &this.machine.data_race {
958 let old = data_race.ongoing_action_data_race_free.replace(true);
959 assert!(!old, "cannot nest allow_data_races");
961 let result = op(this);
962 if let Some(data_race) = &this.machine.data_race {
963 data_race.ongoing_action_data_race_free.set(false);
968 /// Checks that an atomic access is legal at the given place.
969 fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
970 let this = self.eval_context_ref();
971 // Check alignment requirements. Atomics must always be aligned to their size,
972 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
974 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
975 this.check_ptr_access_align(
979 CheckInAllocMsg::MemoryAccessTest,
981 // Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
982 // memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
983 // atomic loads can be implemented via compare_exchange on some targets. There could
984 // possibly be some very specific exceptions to this, see
985 // <https://github.com/rust-lang/miri/pull/2464#discussion_r939636130> for details.
986 // We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
987 // access will happen later.
988 let (alloc_id, _offset, _prov) =
989 this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
990 if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
991 // FIXME: make this prettier, once these messages have separate title/span/help messages.
993 "atomic operations cannot be performed on read-only memory\n\
994 many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails \
995 (and is hence nominally read-only)\n\
996 some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; \
997 it is possible that we could have an exception permitting this for specific kinds of loads\n\
998 please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you"
1004 /// Update the data-race detector for an atomic read occurring at the
1005 /// associated memory-place and on the current thread.
1006 fn validate_atomic_load(
1008 place: &MPlaceTy<'tcx, Provenance>,
1009 atomic: AtomicReadOrd,
1010 ) -> InterpResult<'tcx> {
1011 let this = self.eval_context_ref();
1012 this.validate_overlapping_atomic(place)?;
1013 this.validate_atomic_op(
1017 move |memory, clocks, index, atomic| {
1018 if atomic == AtomicReadOrd::Relaxed {
1019 memory.load_relaxed(&mut *clocks, index)
1021 memory.load_acquire(&mut *clocks, index)
1027 /// Update the data-race detector for an atomic write occurring at the
1028 /// associated memory-place and on the current thread.
1029 fn validate_atomic_store(
1031 place: &MPlaceTy<'tcx, Provenance>,
1032 atomic: AtomicWriteOrd,
1033 ) -> InterpResult<'tcx> {
1034 let this = self.eval_context_mut();
1035 this.validate_overlapping_atomic(place)?;
1036 this.validate_atomic_op(
1040 move |memory, clocks, index, atomic| {
1041 if atomic == AtomicWriteOrd::Relaxed {
1042 memory.store_relaxed(clocks, index)
1044 memory.store_release(clocks, index)
1050 /// Update the data-race detector for an atomic read-modify-write occurring
1051 /// at the associated memory place and on the current thread.
1052 fn validate_atomic_rmw(
1054 place: &MPlaceTy<'tcx, Provenance>,
1055 atomic: AtomicRwOrd,
1056 ) -> InterpResult<'tcx> {
1058 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1059 let release = matches!(atomic, Release | AcqRel | SeqCst);
1060 let this = self.eval_context_mut();
1061 this.validate_overlapping_atomic(place)?;
1062 this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
1064 memory.load_acquire(clocks, index)?;
1066 memory.load_relaxed(clocks, index)?;
1069 memory.rmw_release(clocks, index)
1071 memory.rmw_relaxed(clocks, index)
1076 /// Generic atomic operation implementation
1077 fn validate_atomic_op<A: Debug + Copy>(
1079 place: &MPlaceTy<'tcx, Provenance>,
1083 &mut MemoryCellClocks,
1084 &mut ThreadClockSet,
1087 ) -> Result<(), DataRace>,
1088 ) -> InterpResult<'tcx> {
1089 let this = self.eval_context_ref();
1090 if let Some(data_race) = &this.machine.data_race {
1091 if data_race.race_detecting() {
1092 let size = place.layout.size;
1093 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr)?;
1094 // Load and log the atomic operation.
1095 // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1096 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
1098 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1105 // Perform the atomic operation.
1106 data_race.maybe_perform_sync_operation(
1107 &this.machine.threads,
1108 |index, mut clocks| {
1109 for (offset, range) in
1110 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1112 if let Err(DataRace) = op(range, &mut clocks, index, atomic) {
1114 return VClockAlloc::report_data_race(
1116 &this.machine.threads,
1120 Pointer::new(alloc_id, offset),
1126 // This conservatively assumes all operations have release semantics
1131 // Log changes to atomic memory.
1132 if log::log_enabled!(log::Level::Trace) {
1133 for (_offset, range) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size)
1136 "Updated atomic memory({:?}, size={}) to {:#?}",
1149 /// Extra metadata associated with a thread.
1150 #[derive(Debug, Clone, Default)]
1151 struct ThreadExtraState {
1152 /// The current vector index in use by the
1153 /// thread currently, this is set to None
1154 /// after the vector index has been re-used
1155 /// and hence the value will never need to be
1156 /// read during data-race reporting.
1157 vector_index: Option<VectorIdx>,
1159 /// Thread termination vector clock, this
1160 /// is set on thread termination and is used
1161 /// for joining on threads since the vector_index
1162 /// may be re-used when the join operation occurs.
1163 termination_vector_clock: Option<VClock>,
1166 /// Global data-race detection state, contains the currently
1167 /// executing thread as well as the vector-clocks associated
1168 /// with each of the threads.
1169 // FIXME: it is probably better to have one large RefCell, than to have so many small ones.
1170 #[derive(Debug, Clone)]
1171 pub struct GlobalState {
1172 /// Set to true once the first additional
1173 /// thread has launched, due to the dependency
1174 /// between before and after a thread launch.
1175 /// Any data-races must be recorded after this
1176 /// so concurrent execution can ignore recording
1178 multi_threaded: Cell<bool>,
1180 /// A flag to mark we are currently performing
1181 /// a data race free action (such as atomic access)
1182 /// to supress the race detector
1183 ongoing_action_data_race_free: Cell<bool>,
1185 /// Mapping of a vector index to a known set of thread
1186 /// clocks, this is not directly mapping from a thread id
1187 /// since it may refer to multiple threads.
1188 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
1190 /// Mapping of a given vector index to the current thread
1191 /// that the execution is representing, this may change
1192 /// if a vector index is re-assigned to a new thread.
1193 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
1195 /// The mapping of a given thread to associated thread metadata.
1196 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
1198 /// Potential vector indices that could be re-used on thread creation
1199 /// values are inserted here on after the thread has terminated and
1200 /// been joined with, and hence may potentially become free
1201 /// for use as the index for a new thread.
1202 /// Elements in this set may still require the vector index to
1203 /// report data-races, and can only be re-used after all
1204 /// active vector-clocks catch up with the threads timestamp.
1205 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
1207 /// This contains threads that have terminated, but not yet joined
1208 /// and so cannot become re-use candidates until a join operation
1210 /// The associated vector index will be moved into re-use candidates
1211 /// after the join operation occurs.
1212 terminated_threads: RefCell<FxHashMap<ThreadId, VectorIdx>>,
1214 /// The timestamp of last SC fence performed by each thread
1215 last_sc_fence: RefCell<VClock>,
1217 /// The timestamp of last SC write performed by each thread
1218 last_sc_write: RefCell<VClock>,
1220 /// Track when an outdated (weak memory) load happens.
1221 pub track_outdated_loads: bool,
1224 impl VisitTags for GlobalState {
1225 fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
1226 // We don't have any tags.
1231 /// Create a new global state, setup with just thread-id=0
1232 /// advanced to timestamp = 1.
1233 pub fn new(config: &MiriConfig) -> Self {
1234 let mut global_state = GlobalState {
1235 multi_threaded: Cell::new(false),
1236 ongoing_action_data_race_free: Cell::new(false),
1237 vector_clocks: RefCell::new(IndexVec::new()),
1238 vector_info: RefCell::new(IndexVec::new()),
1239 thread_info: RefCell::new(IndexVec::new()),
1240 reuse_candidates: RefCell::new(FxHashSet::default()),
1241 terminated_threads: RefCell::new(FxHashMap::default()),
1242 last_sc_fence: RefCell::new(VClock::default()),
1243 last_sc_write: RefCell::new(VClock::default()),
1244 track_outdated_loads: config.track_outdated_loads,
1247 // Setup the main-thread since it is not explicitly created:
1248 // uses vector index and thread-id 0.
1249 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1250 global_state.vector_info.get_mut().push(ThreadId::new(0));
1254 .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1259 // We perform data race detection when there are more than 1 active thread
1260 // and we have not temporarily disabled race detection to perform something
1262 fn race_detecting(&self) -> bool {
1263 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1266 pub fn ongoing_action_data_race_free(&self) -> bool {
1267 self.ongoing_action_data_race_free.get()
1270 // Try to find vector index values that can potentially be re-used
1271 // by a new thread instead of a new vector index being created.
1272 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1273 let mut reuse = self.reuse_candidates.borrow_mut();
1274 let vector_clocks = self.vector_clocks.borrow();
1275 let vector_info = self.vector_info.borrow();
1276 let terminated_threads = self.terminated_threads.borrow();
1277 for &candidate in reuse.iter() {
1278 let target_timestamp = vector_clocks[candidate].clock[candidate];
1279 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1280 // The thread happens before the clock, and hence cannot report
1281 // a data-race with this the candidate index.
1282 let no_data_race = clock.clock[candidate] >= target_timestamp;
1284 // The vector represents a thread that has terminated and hence cannot
1285 // report a data-race with the candidate index.
1286 let thread_id = vector_info[clock_idx];
1287 let vector_terminated =
1288 reuse.contains(&clock_idx) || terminated_threads.contains_key(&thread_id);
1290 // The vector index cannot report a race with the candidate index
1291 // and hence allows the candidate index to be re-used.
1292 no_data_race || vector_terminated
1294 // All vector clocks for each vector index are equal to
1295 // the target timestamp, and the thread is known to have
1296 // terminated, therefore this vector clock index cannot
1297 // report any more data-races.
1298 assert!(reuse.remove(&candidate));
1299 return Some(candidate);
1305 // Hook for thread creation, enabled multi-threaded execution and marks
1306 // the current thread timestamp as happening-before the current thread.
1308 pub fn thread_created(&mut self, thread_mgr: &ThreadManager<'_, '_>, thread: ThreadId) {
1309 let current_index = self.current_index(thread_mgr);
1311 // Enable multi-threaded execution, there are now at least two threads
1312 // so data-races are now possible.
1313 self.multi_threaded.set(true);
1315 // Load and setup the associated thread metadata
1316 let mut thread_info = self.thread_info.borrow_mut();
1317 thread_info.ensure_contains_elem(thread, Default::default);
1319 // Assign a vector index for the thread, attempting to re-use an old
1320 // vector index that can no longer report any data-races if possible.
1321 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1322 // Now re-configure the re-use candidate, increment the clock
1323 // for the new sync use of the vector.
1324 let vector_clocks = self.vector_clocks.get_mut();
1325 vector_clocks[reuse_index].increment_clock(reuse_index);
1327 // Locate the old thread the vector was associated with and update
1328 // it to represent the new thread instead.
1329 let vector_info = self.vector_info.get_mut();
1330 let old_thread = vector_info[reuse_index];
1331 vector_info[reuse_index] = thread;
1333 // Mark the thread the vector index was associated with as no longer
1334 // representing a thread index.
1335 thread_info[old_thread].vector_index = None;
1339 // No vector re-use candidates available, instead create
1340 // a new vector index.
1341 let vector_info = self.vector_info.get_mut();
1342 vector_info.push(thread)
1345 log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1347 // Mark the chosen vector index as in use by the thread.
1348 thread_info[thread].vector_index = Some(created_index);
1350 // Create a thread clock set if applicable.
1351 let vector_clocks = self.vector_clocks.get_mut();
1352 if created_index == vector_clocks.next_index() {
1353 vector_clocks.push(ThreadClockSet::default());
1356 // Now load the two clocks and configure the initial state.
1357 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1359 // Join the created with current, since the current threads
1360 // previous actions happen-before the created thread.
1361 created.join_with(current);
1363 // Advance both threads after the synchronized operation.
1364 // Both operations are considered to have release semantics.
1365 current.increment_clock(current_index);
1366 created.increment_clock(created_index);
1369 /// Hook on a thread join to update the implicit happens-before relation between the joined
1370 /// thread (the joinee, the thread that someone waited on) and the current thread (the joiner,
1371 /// the thread who was waiting).
1373 pub fn thread_joined(
1375 thread_mgr: &ThreadManager<'_, '_>,
1379 let clocks_vec = self.vector_clocks.get_mut();
1380 let thread_info = self.thread_info.get_mut();
1382 // Load the vector clock of the current thread.
1383 let current_index = thread_info[joiner]
1385 .expect("Performed thread join on thread with no assigned vector");
1386 let current = &mut clocks_vec[current_index];
1388 // Load the associated vector clock for the terminated thread.
1389 let join_clock = thread_info[joinee]
1390 .termination_vector_clock
1392 .expect("Joined with thread but thread has not terminated");
1394 // The join thread happens-before the current thread
1395 // so update the current vector clock.
1396 // Is not a release operation so the clock is not incremented.
1397 current.clock.join(join_clock);
1399 // Check the number of live threads, if the value is 1
1400 // then test for potentially disabling multi-threaded execution.
1401 if thread_mgr.get_live_thread_count() == 1 {
1402 // May potentially be able to disable multi-threaded execution.
1403 let current_clock = &clocks_vec[current_index];
1406 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1408 // All thread terminations happen-before the current clock
1409 // therefore no data-races can be reported until a new thread
1410 // is created, so disable multi-threaded execution.
1411 self.multi_threaded.set(false);
1415 // If the thread is marked as terminated but not joined
1416 // then move the thread to the re-use set.
1417 let termination = self.terminated_threads.get_mut();
1418 if let Some(index) = termination.remove(&joinee) {
1419 let reuse = self.reuse_candidates.get_mut();
1420 reuse.insert(index);
1424 /// On thread termination, the vector-clock may re-used
1425 /// in the future once all remaining thread-clocks catch
1426 /// up with the time index of the terminated thread.
1427 /// This assigns thread termination with a unique index
1428 /// which will be used to join the thread
1429 /// This should be called strictly before any calls to
1430 /// `thread_joined`.
1432 pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_, '_>) {
1433 let current_index = self.current_index(thread_mgr);
1435 // Increment the clock to a unique termination timestamp.
1436 let vector_clocks = self.vector_clocks.get_mut();
1437 let current_clocks = &mut vector_clocks[current_index];
1438 current_clocks.increment_clock(current_index);
1440 // Load the current thread id for the executing vector.
1441 let vector_info = self.vector_info.get_mut();
1442 let current_thread = vector_info[current_index];
1444 // Load the current thread metadata, and move to a terminated
1445 // vector state. Setting up the vector clock all join operations
1447 let thread_info = self.thread_info.get_mut();
1448 let current = &mut thread_info[current_thread];
1449 current.termination_vector_clock = Some(current_clocks.clock.clone());
1451 // Add this thread as a candidate for re-use after a thread join
1453 let termination = self.terminated_threads.get_mut();
1454 termination.insert(current_thread, current_index);
1457 /// Attempt to perform a synchronized operation, this
1458 /// will perform no operation if multi-threading is
1459 /// not currently enabled.
1460 /// Otherwise it will increment the clock for the current
1461 /// vector before and after the operation for data-race
1462 /// detection between any happens-before edges the
1463 /// operation may create.
1464 fn maybe_perform_sync_operation<'tcx>(
1466 thread_mgr: &ThreadManager<'_, '_>,
1467 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1468 ) -> InterpResult<'tcx> {
1469 if self.multi_threaded.get() {
1470 let (index, clocks) = self.current_thread_state_mut(thread_mgr);
1471 if op(index, clocks)? {
1472 let (_, mut clocks) = self.current_thread_state_mut(thread_mgr);
1473 clocks.increment_clock(index);
1479 /// Internal utility to identify a thread stored internally
1480 /// returns the id and the name for better diagnostics.
1481 fn print_thread_metadata(
1483 thread_mgr: &ThreadManager<'_, '_>,
1486 let thread = self.vector_info.borrow()[vector];
1487 let thread_name = thread_mgr.get_thread_name(thread);
1488 format!("thread `{}`", String::from_utf8_lossy(thread_name))
1491 /// Acquire a lock, express that the previous call of
1492 /// `validate_lock_release` must happen before this.
1493 /// As this is an acquire operation, the thread timestamp is not
1495 pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
1496 let (_, mut clocks) = self.load_thread_state_mut(thread);
1497 clocks.clock.join(lock);
1500 /// Release a lock handle, express that this happens-before
1501 /// any subsequent calls to `validate_lock_acquire`.
1502 /// For normal locks this should be equivalent to `validate_lock_release_shared`
1503 /// since an acquire operation should have occurred before, however
1504 /// for futex & condvar operations this is not the case and this
1505 /// operation must be used.
1506 pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId) {
1507 let (index, mut clocks) = self.load_thread_state_mut(thread);
1508 lock.clone_from(&clocks.clock);
1509 clocks.increment_clock(index);
1512 /// Release a lock handle, express that this happens-before
1513 /// any subsequent calls to `validate_lock_acquire` as well
1514 /// as any previous calls to this function after any
1515 /// `validate_lock_release` calls.
1516 /// For normal locks this should be equivalent to `validate_lock_release`.
1517 /// This function only exists for joining over the set of concurrent readers
1518 /// in a read-write lock and should not be used for anything else.
1519 pub fn validate_lock_release_shared(&self, lock: &mut VClock, thread: ThreadId) {
1520 let (index, mut clocks) = self.load_thread_state_mut(thread);
1521 lock.join(&clocks.clock);
1522 clocks.increment_clock(index);
1525 /// Load the vector index used by the given thread as well as the set of vector clocks
1526 /// used by the thread.
1528 fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1529 let index = self.thread_info.borrow()[thread]
1531 .expect("Loading thread state for thread with no assigned vector");
1532 let ref_vector = self.vector_clocks.borrow_mut();
1533 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1537 /// Load the current vector clock in use and the current set of thread clocks
1538 /// in use for the vector.
1540 pub(super) fn current_thread_state(
1542 thread_mgr: &ThreadManager<'_, '_>,
1543 ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1544 let index = self.current_index(thread_mgr);
1545 let ref_vector = self.vector_clocks.borrow();
1546 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1550 /// Load the current vector clock in use and the current set of thread clocks
1551 /// in use for the vector mutably for modification.
1553 pub(super) fn current_thread_state_mut(
1555 thread_mgr: &ThreadManager<'_, '_>,
1556 ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1557 let index = self.current_index(thread_mgr);
1558 let ref_vector = self.vector_clocks.borrow_mut();
1559 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1563 /// Return the current thread, should be the same
1564 /// as the data-race active thread.
1566 fn current_index(&self, thread_mgr: &ThreadManager<'_, '_>) -> VectorIdx {
1567 let active_thread_id = thread_mgr.get_active_thread_id();
1568 self.thread_info.borrow()[active_thread_id]
1570 .expect("active thread has no assigned vector")
1573 // SC ATOMIC STORE rule in the paper.
1574 pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_, '_>) {
1575 let (index, clocks) = self.current_thread_state(thread_mgr);
1576 self.last_sc_write.borrow_mut().set_at_index(&clocks.clock, index);
1579 // SC ATOMIC READ rule in the paper.
1580 pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_, '_>) {
1581 let (.., mut clocks) = self.current_thread_state_mut(thread_mgr);
1582 clocks.read_seqcst.join(&self.last_sc_fence.borrow());