1 //! Implementation of a data-race detector using Lamport Timestamps / Vector-clocks
2 //! based on the Dynamic Race Detection for C++:
3 //! <https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf>
4 //! which does not report false-positives when fences are used, and gives better
5 //! accuracy in presence of read-modify-write operations.
7 //! The implementation contains modifications to correctly model the changes to the memory model in C++20
8 //! regarding the weakening of release sequences: <http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0982r1.html>.
9 //! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
10 //! sequences is not needed.
12 //! The implementation also models races with memory allocation and deallocation via treating allocation and
13 //! deallocation as a type of write internally for detecting data-races.
15 //! Weak memory orders are explored but not all weak behaviours are exhibited, so it can still miss data-races
16 //! but should not report false-positives
18 //! Data-race definition from(<https://en.cppreference.com/w/cpp/language/memory_model#Threads_and_data_races>):
19 //! a data race occurs between two memory accesses if they are on different threads, at least one operation
20 //! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
21 //! for full definition.
23 //! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
24 //! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
25 //! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
26 //! on are not considered. Since the thread's vector clock will only increase and a data-race implies that
27 //! there is some index x where clock\[x\] > thread_clock, when this is true clock\[candidate-idx\] > thread_clock
28 //! can never hold and hence a data-race can never be reported in that vector index again.
29 //! This means that the thread-index can be safely re-used, starting on the next timestamp for the newly created
32 //! The timestamps used in the data-race detector assign each sequence of non-atomic operations
33 //! followed by a single atomic or concurrent operation a single timestamp.
34 //! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
35 //! This is because extra increment operations between the operations in the sequence are not
36 //! required for accurate reporting of data-race values.
38 //! As per the paper a threads timestamp is only incremented after a release operation is performed
39 //! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared
40 //! code some atomic operations may increment the timestamp when not necessary but this has no effect
41 //! on the data-race detection code.
44 //! currently we have our own local copy of the currently active thread index and names, this is due
45 //! in part to the inability to access the current location of threads.active_thread inside the AllocExtra
46 //! read, write and deallocate functions and should be cleaned up in the future.
49 cell::{Cell, Ref, RefCell, RefMut},
54 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
55 use rustc_index::vec::{Idx, IndexVec};
56 use rustc_middle::{mir, ty::layout::TyAndLayout};
57 use rustc_target::abi::Size;
61 use super::weak_memory::EvalContextExt as _;
63 pub type AllocExtra = VClockAlloc;
65 /// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive).
66 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
75 /// Valid atomic read operations, subset of atomic::Ordering.
76 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
77 pub enum AtomicReadOp {
83 /// Valid atomic write operations, subset of atomic::Ordering.
84 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
85 pub enum AtomicWriteOp {
91 /// Valid atomic fence operations, subset of atomic::Ordering.
92 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
93 pub enum AtomicFenceOp {
100 /// The current set of vector clocks describing the state
101 /// of a thread, contains the happens-before clock and
102 /// additional metadata to model atomic fence operations.
103 #[derive(Clone, Default, Debug)]
104 pub(super) struct ThreadClockSet {
105 /// The increasing clock representing timestamps
106 /// that happen-before this thread.
107 pub(super) clock: VClock,
109 /// The set of timestamps that will happen-before this
110 /// thread once it performs an acquire fence.
111 fence_acquire: VClock,
113 /// The last timestamp of happens-before relations that
114 /// have been released by this thread by a fence.
115 fence_release: VClock,
117 /// Timestamps of the last SC fence performed by each
118 /// thread, updated when this thread performs an SC fence
119 pub(super) fence_seqcst: VClock,
121 /// Timestamps of the last SC write performed by each
122 /// thread, updated when this thread performs an SC fence
123 pub(super) write_seqcst: VClock,
125 /// Timestamps of the last SC fence performed by each
126 /// thread, updated when this thread performs an SC read
127 pub(super) read_seqcst: VClock,
130 impl ThreadClockSet {
131 /// Apply the effects of a release fence to this
132 /// set of thread vector clocks.
134 fn apply_release_fence(&mut self) {
135 self.fence_release.clone_from(&self.clock);
138 /// Apply the effects of an acquire fence to this
139 /// set of thread vector clocks.
141 fn apply_acquire_fence(&mut self) {
142 self.clock.join(&self.fence_acquire);
145 /// Increment the happens-before clock at a
148 fn increment_clock(&mut self, index: VectorIdx) {
149 self.clock.increment_index(index);
152 /// Join the happens-before clock with that of
153 /// another thread, used to model thread join
155 fn join_with(&mut self, other: &ThreadClockSet) {
156 self.clock.join(&other.clock);
160 /// Error returned by finding a data race
161 /// should be elaborated upon.
162 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
165 /// Externally stored memory cell clocks
166 /// explicitly to reduce memory usage for the
167 /// common case where no atomic operations
168 /// exists on the memory cell.
169 #[derive(Clone, PartialEq, Eq, Default, Debug)]
170 struct AtomicMemoryCellClocks {
171 /// The clock-vector of the timestamp of the last atomic
172 /// read operation performed by each thread.
173 /// This detects potential data-races between atomic read
174 /// and non-atomic write operations.
177 /// The clock-vector of the timestamp of the last atomic
178 /// write operation performed by each thread.
179 /// This detects potential data-races between atomic write
180 /// and non-atomic read or write operations.
181 write_vector: VClock,
183 /// Synchronization vector for acquire-release semantics
184 /// contains the vector of timestamps that will
185 /// happen-before a thread if an acquire-load is
186 /// performed on the data.
190 /// Type of write operation: allocating memory
191 /// non-atomic writes and deallocating memory
192 /// are all treated as writes for the purpose
193 /// of the data-race detector.
194 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
199 /// Standard unsynchronized write.
202 /// Deallocate memory.
203 /// Note that when memory is deallocated first, later non-atomic accesses
204 /// will be reported as use-after-free, not as data races.
205 /// (Same for `Allocate` above.)
209 fn get_descriptor(self) -> &'static str {
211 WriteType::Allocate => "Allocate",
212 WriteType::Write => "Write",
213 WriteType::Deallocate => "Deallocate",
218 /// Memory Cell vector clock metadata
219 /// for data-race detection.
220 #[derive(Clone, PartialEq, Eq, Debug)]
221 struct MemoryCellClocks {
222 /// The vector-clock timestamp of the last write
223 /// corresponding to the writing threads timestamp.
226 /// The identifier of the vector index, corresponding to a thread
227 /// that performed the last write operation.
228 write_index: VectorIdx,
230 /// The type of operation that the write index represents,
231 /// either newly allocated memory, a non-atomic write or
232 /// a deallocation of memory.
233 write_type: WriteType,
235 /// The vector-clock of the timestamp of the last read operation
236 /// performed by a thread since the last write operation occurred.
237 /// It is reset to zero on each write operation.
240 /// Atomic acquire & release sequence tracking clocks.
241 /// For non-atomic memory in the common case this
242 /// value is set to None.
243 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
246 impl MemoryCellClocks {
247 /// Create a new set of clocks representing memory allocated
248 /// at a given vector timestamp and index.
249 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
251 read: VClock::default(),
253 write_index: alloc_index,
254 write_type: WriteType::Allocate,
259 /// Load the internal atomic memory cells if they exist.
261 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
262 match &self.atomic_ops {
263 Some(op) => Some(&*op),
268 /// Load or create the internal atomic memory metadata
269 /// if it does not exist.
271 fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
272 self.atomic_ops.get_or_insert_with(Default::default)
275 /// Update memory cell data-race tracking for atomic
276 /// load acquire semantics, is a no-op if this memory was
277 /// not used previously as atomic memory.
280 clocks: &mut ThreadClockSet,
282 ) -> Result<(), DataRace> {
283 self.atomic_read_detect(clocks, index)?;
284 if let Some(atomic) = self.atomic() {
285 clocks.clock.join(&atomic.sync_vector);
290 /// Checks if the memory cell access is ordered with all prior atomic reads and writes
291 fn race_free_with_atomic(&self, clocks: &ThreadClockSet) -> bool {
292 if let Some(atomic) = self.atomic() {
293 atomic.read_vector <= clocks.clock && atomic.write_vector <= clocks.clock
299 /// Update memory cell data-race tracking for atomic
300 /// load relaxed semantics, is a no-op if this memory was
301 /// not used previously as atomic memory.
304 clocks: &mut ThreadClockSet,
306 ) -> Result<(), DataRace> {
307 self.atomic_read_detect(clocks, index)?;
308 if let Some(atomic) = self.atomic() {
309 clocks.fence_acquire.join(&atomic.sync_vector);
314 /// Update the memory cell data-race tracking for atomic
315 /// store release semantics.
316 fn store_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
317 self.atomic_write_detect(clocks, index)?;
318 let atomic = self.atomic_mut();
319 atomic.sync_vector.clone_from(&clocks.clock);
323 /// Update the memory cell data-race tracking for atomic
324 /// store relaxed semantics.
325 fn store_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
326 self.atomic_write_detect(clocks, index)?;
328 // The handling of release sequences was changed in C++20 and so
329 // the code here is different to the paper since now all relaxed
330 // stores block release sequences. The exception for same-thread
331 // relaxed stores has been removed.
332 let atomic = self.atomic_mut();
333 atomic.sync_vector.clone_from(&clocks.fence_release);
337 /// Update the memory cell data-race tracking for atomic
338 /// store release semantics for RMW operations.
339 fn rmw_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
340 self.atomic_write_detect(clocks, index)?;
341 let atomic = self.atomic_mut();
342 atomic.sync_vector.join(&clocks.clock);
346 /// Update the memory cell data-race tracking for atomic
347 /// store relaxed semantics for RMW operations.
348 fn rmw_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
349 self.atomic_write_detect(clocks, index)?;
350 let atomic = self.atomic_mut();
351 atomic.sync_vector.join(&clocks.fence_release);
355 /// Detect data-races with an atomic read, caused by a non-atomic write that does
356 /// not happen-before the atomic-read.
357 fn atomic_read_detect(
359 clocks: &ThreadClockSet,
361 ) -> Result<(), DataRace> {
362 log::trace!("Atomic read with vectors: {:#?} :: {:#?}", self, clocks);
363 if self.write <= clocks.clock[self.write_index] {
364 let atomic = self.atomic_mut();
365 atomic.read_vector.set_at_index(&clocks.clock, index);
372 /// Detect data-races with an atomic write, either with a non-atomic read or with
373 /// a non-atomic write.
374 fn atomic_write_detect(
376 clocks: &ThreadClockSet,
378 ) -> Result<(), DataRace> {
379 log::trace!("Atomic write with vectors: {:#?} :: {:#?}", self, clocks);
380 if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
381 let atomic = self.atomic_mut();
382 atomic.write_vector.set_at_index(&clocks.clock, index);
389 /// Detect races for non-atomic read operations at the current memory cell
390 /// returns true if a data-race is detected.
393 clocks: &ThreadClockSet,
395 ) -> Result<(), DataRace> {
396 log::trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, clocks);
397 if self.write <= clocks.clock[self.write_index] {
398 let race_free = if let Some(atomic) = self.atomic() {
399 atomic.write_vector <= clocks.clock
404 self.read.set_at_index(&clocks.clock, index);
414 /// Detect races for non-atomic write operations at the current memory cell
415 /// returns true if a data-race is detected.
416 fn write_race_detect(
418 clocks: &ThreadClockSet,
420 write_type: WriteType,
421 ) -> Result<(), DataRace> {
422 log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
423 if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
424 let race_free = if let Some(atomic) = self.atomic() {
425 atomic.write_vector <= clocks.clock && atomic.read_vector <= clocks.clock
430 self.write = clocks.clock[index];
431 self.write_index = index;
432 self.write_type = write_type;
433 self.read.set_zero_vector();
444 /// Evaluation context extensions.
445 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
446 pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
447 /// Temporarily allow data-races to occur. This should only be used in
448 /// one of these cases:
449 /// - One of the appropriate `validate_atomic` functions will be called to
450 /// to treat a memory access as atomic.
451 /// - The memory being accessed should be treated as internal state, that
452 /// cannot be accessed by the interpreted program.
453 /// - Execution of the interpreted program execution has halted.
455 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
456 let this = self.eval_context_ref();
457 if let Some(data_race) = &this.machine.data_race {
458 data_race.ongoing_action_data_race_free.set(true);
460 let result = op(this);
461 if let Some(data_race) = &this.machine.data_race {
462 data_race.ongoing_action_data_race_free.set(false);
467 /// Same as `allow_data_races_ref`, this temporarily disables any data-race detection and
468 /// so should only be used for atomic operations or internal state that the program cannot
471 fn allow_data_races_mut<R>(
473 op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R,
475 let this = self.eval_context_mut();
476 if let Some(data_race) = &this.machine.data_race {
477 data_race.ongoing_action_data_race_free.set(true);
479 let result = op(this);
480 if let Some(data_race) = &this.machine.data_race {
481 data_race.ongoing_action_data_race_free.set(false);
486 /// Atomic variant of read_scalar_at_offset.
487 fn read_scalar_at_offset_atomic(
489 op: &OpTy<'tcx, Tag>,
491 layout: TyAndLayout<'tcx>,
492 atomic: AtomicReadOp,
493 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
494 let this = self.eval_context_ref();
495 let value_place = this.deref_operand_and_offset(op, offset, layout)?;
496 this.read_scalar_atomic(&value_place, atomic)
499 /// Atomic variant of write_scalar_at_offset.
500 fn write_scalar_at_offset_atomic(
502 op: &OpTy<'tcx, Tag>,
504 value: impl Into<ScalarMaybeUninit<Tag>>,
505 layout: TyAndLayout<'tcx>,
506 atomic: AtomicWriteOp,
507 ) -> InterpResult<'tcx> {
508 let this = self.eval_context_mut();
509 let value_place = this.deref_operand_and_offset(op, offset, layout)?;
510 this.write_scalar_atomic(value.into(), &value_place, atomic)
513 /// Perform an atomic read operation at the memory location.
514 fn read_scalar_atomic(
516 place: &MPlaceTy<'tcx, Tag>,
517 atomic: AtomicReadOp,
518 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
519 let this = self.eval_context_ref();
520 // This will read from the last store in the modification order of this location. In case
521 // weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
522 // This is fine with StackedBorrow and race checks because they don't concern metadata on
523 // the *value* (including the associated provenance if this is an AtomicPtr) at this location.
524 // Only metadata on the location itself is used.
525 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
526 this.validate_overlapping_atomic(place)?;
527 this.buffered_atomic_read(place, atomic, scalar, || {
528 this.validate_atomic_load(place, atomic)
532 /// Perform an atomic write operation at the memory location.
533 fn write_scalar_atomic(
535 val: ScalarMaybeUninit<Tag>,
536 dest: &MPlaceTy<'tcx, Tag>,
537 atomic: AtomicWriteOp,
538 ) -> InterpResult<'tcx> {
539 let this = self.eval_context_mut();
540 this.validate_overlapping_atomic(dest)?;
541 this.allow_data_races_mut(move |this| this.write_scalar(val, &(*dest).into()))?;
542 this.validate_atomic_store(dest, atomic)?;
543 // FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause
544 // side effects from a read the program did not perform. So we have to initialise
545 // the store buffer with the value currently being written
546 // ONCE this is fixed please remove the hack in buffered_atomic_write() in weak_memory.rs
547 // https://github.com/rust-lang/miri/issues/2164
548 this.buffered_atomic_write(val, dest, atomic, val)
551 /// Perform an atomic operation on a memory location.
552 fn atomic_op_immediate(
554 place: &MPlaceTy<'tcx, Tag>,
555 rhs: &ImmTy<'tcx, Tag>,
559 ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
560 let this = self.eval_context_mut();
562 this.validate_overlapping_atomic(place)?;
563 let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
565 // Atomics wrap around on overflow.
566 let val = this.binary_op(op, &old, rhs)?;
567 let val = if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val };
568 this.allow_data_races_mut(|this| this.write_immediate(*val, &(*place).into()))?;
570 this.validate_atomic_rmw(place, atomic)?;
572 this.buffered_atomic_rmw(
573 val.to_scalar_or_uninit(),
576 old.to_scalar_or_uninit(),
581 /// Perform an atomic exchange with a memory place and a new
582 /// scalar value, the old value is returned.
583 fn atomic_exchange_scalar(
585 place: &MPlaceTy<'tcx, Tag>,
586 new: ScalarMaybeUninit<Tag>,
588 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
589 let this = self.eval_context_mut();
591 this.validate_overlapping_atomic(place)?;
592 let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
593 this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
595 this.validate_atomic_rmw(place, atomic)?;
597 this.buffered_atomic_rmw(new, place, atomic, old)?;
601 /// Perform an conditional atomic exchange with a memory place and a new
602 /// scalar value, the old value is returned.
603 fn atomic_min_max_scalar(
605 place: &MPlaceTy<'tcx, Tag>,
606 rhs: ImmTy<'tcx, Tag>,
609 ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
610 let this = self.eval_context_mut();
612 this.validate_overlapping_atomic(place)?;
613 let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
614 let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar()?.to_bool()?;
616 let new_val = if min {
617 if lt { &old } else { &rhs }
619 if lt { &rhs } else { &old }
622 this.allow_data_races_mut(|this| this.write_immediate(**new_val, &(*place).into()))?;
624 this.validate_atomic_rmw(place, atomic)?;
626 this.buffered_atomic_rmw(
627 new_val.to_scalar_or_uninit(),
630 old.to_scalar_or_uninit(),
633 // Return the old value.
637 /// Perform an atomic compare and exchange at a given memory location.
638 /// On success an atomic RMW operation is performed and on failure
639 /// only an atomic read occurs. If `can_fail_spuriously` is true,
640 /// then we treat it as a "compare_exchange_weak" operation, and
641 /// some portion of the time fail even when the values are actually
643 fn atomic_compare_exchange_scalar(
645 place: &MPlaceTy<'tcx, Tag>,
646 expect_old: &ImmTy<'tcx, Tag>,
647 new: ScalarMaybeUninit<Tag>,
650 can_fail_spuriously: bool,
651 ) -> InterpResult<'tcx, Immediate<Tag>> {
653 let this = self.eval_context_mut();
655 this.validate_overlapping_atomic(place)?;
656 // Failure ordering cannot be stronger than success ordering, therefore first attempt
657 // to read with the failure ordering and if successful then try again with the success
658 // read ordering and write in the success case.
659 // Read as immediate for the sake of `binary_op()`
660 let old = this.allow_data_races_mut(|this| this.read_immediate(&(place.into())))?;
661 // `binary_op` will bail if either of them is not a scalar.
662 let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
663 // If the operation would succeed, but is "weak", fail some portion
664 // of the time, based on `success_rate`.
665 let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
666 let cmpxchg_success = eq.to_scalar()?.to_bool()?
667 && if can_fail_spuriously {
668 this.machine.rng.get_mut().gen_bool(success_rate)
672 let res = Immediate::ScalarPair(
673 old.to_scalar_or_uninit(),
674 Scalar::from_bool(cmpxchg_success).into(),
677 // Update ptr depending on comparison.
678 // if successful, perform a full rw-atomic validation
679 // otherwise treat this as an atomic load with the fail ordering.
681 this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?;
682 this.validate_atomic_rmw(place, success)?;
683 this.buffered_atomic_rmw(new, place, success, old.to_scalar_or_uninit())?;
685 this.validate_atomic_load(place, fail)?;
686 // A failed compare exchange is equivalent to a load, reading from the latest store
687 // in the modification order.
688 // Since `old` is only a value and not the store element, we need to separately
689 // find it in our store buffer and perform load_impl on it.
690 this.perform_read_on_buffered_latest(place, fail, old.to_scalar_or_uninit())?;
693 // Return the old value.
697 /// Update the data-race detector for an atomic read occurring at the
698 /// associated memory-place and on the current thread.
699 fn validate_atomic_load(
701 place: &MPlaceTy<'tcx, Tag>,
702 atomic: AtomicReadOp,
703 ) -> InterpResult<'tcx> {
704 let this = self.eval_context_ref();
705 this.validate_overlapping_atomic(place)?;
706 this.validate_atomic_op(
710 move |memory, clocks, index, atomic| {
711 if atomic == AtomicReadOp::Relaxed {
712 memory.load_relaxed(&mut *clocks, index)
714 memory.load_acquire(&mut *clocks, index)
720 /// Update the data-race detector for an atomic write occurring at the
721 /// associated memory-place and on the current thread.
722 fn validate_atomic_store(
724 place: &MPlaceTy<'tcx, Tag>,
725 atomic: AtomicWriteOp,
726 ) -> InterpResult<'tcx> {
727 let this = self.eval_context_mut();
728 this.validate_overlapping_atomic(place)?;
729 this.validate_atomic_op(
733 move |memory, clocks, index, atomic| {
734 if atomic == AtomicWriteOp::Relaxed {
735 memory.store_relaxed(clocks, index)
737 memory.store_release(clocks, index)
743 /// Update the data-race detector for an atomic read-modify-write occurring
744 /// at the associated memory place and on the current thread.
745 fn validate_atomic_rmw(
747 place: &MPlaceTy<'tcx, Tag>,
749 ) -> InterpResult<'tcx> {
751 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
752 let release = matches!(atomic, Release | AcqRel | SeqCst);
753 let this = self.eval_context_mut();
754 this.validate_overlapping_atomic(place)?;
755 this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
757 memory.load_acquire(clocks, index)?;
759 memory.load_relaxed(clocks, index)?;
762 memory.rmw_release(clocks, index)
764 memory.rmw_relaxed(clocks, index)
769 /// Update the data-race detector for an atomic fence on the current thread.
770 fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
771 let this = self.eval_context_mut();
772 if let Some(data_race) = &mut this.machine.data_race {
773 data_race.maybe_perform_sync_operation(|index, mut clocks| {
774 log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
776 // Apply data-race detection for the current fences
777 // this treats AcqRel and SeqCst as the same as an acquire
778 // and release fence applied in the same timestamp.
779 if atomic != AtomicFenceOp::Release {
780 // Either Acquire | AcqRel | SeqCst
781 clocks.apply_acquire_fence();
783 if atomic != AtomicFenceOp::Acquire {
784 // Either Release | AcqRel | SeqCst
785 clocks.apply_release_fence();
787 if atomic == AtomicFenceOp::SeqCst {
788 data_race.last_sc_fence.borrow_mut().set_at_index(&clocks.clock, index);
789 clocks.fence_seqcst.join(&data_race.last_sc_fence.borrow());
790 clocks.write_seqcst.join(&data_race.last_sc_write.borrow());
793 // Increment timestamp in case of release semantics.
794 Ok(atomic != AtomicFenceOp::Acquire)
802 /// Vector clock metadata for a logical memory allocation.
803 #[derive(Debug, Clone)]
804 pub struct VClockAlloc {
805 /// Assigning each byte a MemoryCellClocks.
806 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
810 /// Create a new data-race detector for newly allocated memory.
811 pub fn new_allocation(
812 global: &GlobalState,
814 kind: MemoryKind<MiriMemoryKind>,
816 let (alloc_timestamp, alloc_index) = match kind {
817 // User allocated and stack memory should track allocation.
819 MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap,
821 | MemoryKind::Stack => {
822 let (alloc_index, clocks) = global.current_thread_state();
823 let alloc_timestamp = clocks.clock[alloc_index];
824 (alloc_timestamp, alloc_index)
826 // Other global memory should trace races but be allocated at the 0 timestamp.
828 MiriMemoryKind::Global
829 | MiriMemoryKind::Machine
830 | MiriMemoryKind::Runtime
831 | MiriMemoryKind::ExternStatic
832 | MiriMemoryKind::Tls,
834 | MemoryKind::CallerLocation => (0, VectorIdx::MAX_INDEX),
837 alloc_ranges: RefCell::new(RangeMap::new(
839 MemoryCellClocks::new(alloc_timestamp, alloc_index),
844 // Find an index, if one exists where the value
845 // in `l` is greater than the value in `r`.
846 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
847 log::trace!("Find index where not {:?} <= {:?}", l, r);
848 let l_slice = l.as_slice();
849 let r_slice = r.as_slice();
854 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
856 if l_slice.len() > r_slice.len() {
857 // By invariant, if l_slice is longer
858 // then one element must be larger.
859 // This just validates that this is true
860 // and reports earlier elements first.
861 let l_remainder_slice = &l_slice[r_slice.len()..];
862 let idx = l_remainder_slice
865 .find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
866 .expect("Invalid VClock Invariant");
867 Some(idx + r_slice.len())
875 /// Report a data-race found in the program.
876 /// This finds the two racing threads and the type
877 /// of data-race that occurred. This will also
878 /// return info about the memory location the data-race
882 fn report_data_race<'tcx>(
883 global: &GlobalState,
884 range: &MemoryCellClocks,
887 ptr_dbg: Pointer<AllocId>,
888 ) -> InterpResult<'tcx> {
889 let (current_index, current_clocks) = global.current_thread_state();
891 let (other_action, other_thread, other_clock) = if range.write
892 > current_clocks.clock[range.write_index]
894 // Convert the write action into the vector clock it
895 // represents for diagnostic purposes.
896 write_clock = VClock::new_with_index(range.write_index, range.write);
897 (range.write_type.get_descriptor(), range.write_index, &write_clock)
898 } else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) {
899 ("Read", idx, &range.read)
900 } else if !is_atomic {
901 if let Some(atomic) = range.atomic() {
902 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock)
904 ("Atomic Store", idx, &atomic.write_vector)
905 } else if let Some(idx) =
906 Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock)
908 ("Atomic Load", idx, &atomic.read_vector)
911 "Failed to report data-race for non-atomic operation: no race found"
916 "Failed to report data-race for non-atomic operation: no atomic component"
920 unreachable!("Failed to report data-race for atomic operation")
923 // Load elaborated thread information about the racing thread actions.
924 let current_thread_info = global.print_thread_metadata(current_index);
925 let other_thread_info = global.print_thread_metadata(other_thread);
927 // Throw the data-race detection.
929 "Data race detected between {} on {} and {} on {} at {:?} (current vector clock = {:?}, conflicting timestamp = {:?})",
935 current_clocks.clock,
940 /// Detect racing atomic read and writes (not data races)
941 /// on every byte of the current access range
942 pub(super) fn race_free_with_atomic(&self, range: AllocRange, global: &GlobalState) -> bool {
943 if global.race_detecting() {
944 let (_, clocks) = global.current_thread_state();
945 let alloc_ranges = self.alloc_ranges.borrow();
946 for (_, range) in alloc_ranges.iter(range.start, range.size) {
947 if !range.race_free_with_atomic(&clocks) {
955 /// Detect data-races for an unsynchronized read operation, will not perform
956 /// data-race detection if `race_detecting()` is false, either due to no threads
957 /// being created or if it is temporarily disabled during a racy read or write
958 /// operation for which data-race detection is handled separately, for example
959 /// atomic read operations.
964 global: &GlobalState,
965 ) -> InterpResult<'tcx> {
966 if global.race_detecting() {
967 let (index, clocks) = global.current_thread_state();
968 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
969 for (offset, range) in alloc_ranges.iter_mut(range.start, range.size) {
970 if let Err(DataRace) = range.read_race_detect(&*clocks, index) {
972 return Self::report_data_race(
977 Pointer::new(alloc_id, offset),
987 // Shared code for detecting data-races on unique access to a section of memory
988 fn unique_access<'tcx>(
992 write_type: WriteType,
993 global: &mut GlobalState,
994 ) -> InterpResult<'tcx> {
995 if global.race_detecting() {
996 let (index, clocks) = global.current_thread_state();
997 for (offset, range) in self.alloc_ranges.get_mut().iter_mut(range.start, range.size) {
998 if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) {
1000 return Self::report_data_race(
1003 write_type.get_descriptor(),
1005 Pointer::new(alloc_id, offset),
1015 /// Detect data-races for an unsynchronized write operation, will not perform
1016 /// data-race threads if `race_detecting()` is false, either due to no threads
1017 /// being created or if it is temporarily disabled during a racy read or write
1023 global: &mut GlobalState,
1024 ) -> InterpResult<'tcx> {
1025 self.unique_access(alloc_id, range, WriteType::Write, global)
1028 /// Detect data-races for an unsynchronized deallocate operation, will not perform
1029 /// data-race threads if `race_detecting()` is false, either due to no threads
1030 /// being created or if it is temporarily disabled during a racy read or write
1032 pub fn deallocate<'tcx>(
1036 global: &mut GlobalState,
1037 ) -> InterpResult<'tcx> {
1038 self.unique_access(alloc_id, range, WriteType::Deallocate, global)
1042 impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
1043 trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1044 /// Generic atomic operation implementation
1045 fn validate_atomic_op<A: Debug + Copy>(
1047 place: &MPlaceTy<'tcx, Tag>,
1051 &mut MemoryCellClocks,
1052 &mut ThreadClockSet,
1055 ) -> Result<(), DataRace>,
1056 ) -> InterpResult<'tcx> {
1057 let this = self.eval_context_ref();
1058 if let Some(data_race) = &this.machine.data_race {
1059 if data_race.race_detecting() {
1060 let size = place.layout.size;
1061 let (alloc_id, base_offset, _tag) = this.ptr_get_alloc_id(place.ptr)?;
1062 // Load and log the atomic operation.
1063 // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1064 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
1066 "Atomic op({}) with ordering {:?} on {:?} (size={})",
1073 // Perform the atomic operation.
1074 data_race.maybe_perform_sync_operation(|index, mut clocks| {
1075 for (offset, range) in
1076 alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1078 if let Err(DataRace) = op(range, &mut *clocks, index, atomic) {
1080 return VClockAlloc::report_data_race(
1085 Pointer::new(alloc_id, offset),
1091 // This conservatively assumes all operations have release semantics
1095 // Log changes to atomic memory.
1096 if log::log_enabled!(log::Level::Trace) {
1097 for (_offset, range) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size)
1100 "Updated atomic memory({:?}, size={}) to {:#?}",
1113 /// Extra metadata associated with a thread.
1114 #[derive(Debug, Clone, Default)]
1115 struct ThreadExtraState {
1116 /// The current vector index in use by the
1117 /// thread currently, this is set to None
1118 /// after the vector index has been re-used
1119 /// and hence the value will never need to be
1120 /// read during data-race reporting.
1121 vector_index: Option<VectorIdx>,
1123 /// The name of the thread, updated for better
1124 /// diagnostics when reporting detected data
1126 thread_name: Option<Box<str>>,
1128 /// Thread termination vector clock, this
1129 /// is set on thread termination and is used
1130 /// for joining on threads since the vector_index
1131 /// may be re-used when the join operation occurs.
1132 termination_vector_clock: Option<VClock>,
1135 /// Global data-race detection state, contains the currently
1136 /// executing thread as well as the vector-clocks associated
1137 /// with each of the threads.
1138 // FIXME: it is probably better to have one large RefCell, than to have so many small ones.
1139 #[derive(Debug, Clone)]
1140 pub struct GlobalState {
1141 /// Set to true once the first additional
1142 /// thread has launched, due to the dependency
1143 /// between before and after a thread launch.
1144 /// Any data-races must be recorded after this
1145 /// so concurrent execution can ignore recording
1147 multi_threaded: Cell<bool>,
1149 /// A flag to mark we are currently performing
1150 /// a data race free action (such as atomic access)
1151 /// to supress the race detector
1152 ongoing_action_data_race_free: Cell<bool>,
1154 /// Mapping of a vector index to a known set of thread
1155 /// clocks, this is not directly mapping from a thread id
1156 /// since it may refer to multiple threads.
1157 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
1159 /// Mapping of a given vector index to the current thread
1160 /// that the execution is representing, this may change
1161 /// if a vector index is re-assigned to a new thread.
1162 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
1164 /// The mapping of a given thread to associated thread metadata.
1165 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
1167 /// The current vector index being executed.
1168 current_index: Cell<VectorIdx>,
1170 /// Potential vector indices that could be re-used on thread creation
1171 /// values are inserted here on after the thread has terminated and
1172 /// been joined with, and hence may potentially become free
1173 /// for use as the index for a new thread.
1174 /// Elements in this set may still require the vector index to
1175 /// report data-races, and can only be re-used after all
1176 /// active vector-clocks catch up with the threads timestamp.
1177 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
1179 /// Counts the number of threads that are currently active
1180 /// if the number of active threads reduces to 1 and then
1181 /// a join operation occurs with the remaining main thread
1182 /// then multi-threaded execution may be disabled.
1183 active_thread_count: Cell<usize>,
1185 /// This contains threads that have terminated, but not yet joined
1186 /// and so cannot become re-use candidates until a join operation
1188 /// The associated vector index will be moved into re-use candidates
1189 /// after the join operation occurs.
1190 terminated_threads: RefCell<FxHashMap<ThreadId, VectorIdx>>,
1192 /// The timestamp of last SC fence performed by each thread
1193 last_sc_fence: RefCell<VClock>,
1195 /// The timestamp of last SC write performed by each thread
1196 last_sc_write: RefCell<VClock>,
1200 /// Create a new global state, setup with just thread-id=0
1201 /// advanced to timestamp = 1.
1202 pub fn new() -> Self {
1203 let mut global_state = GlobalState {
1204 multi_threaded: Cell::new(false),
1205 ongoing_action_data_race_free: Cell::new(false),
1206 vector_clocks: RefCell::new(IndexVec::new()),
1207 vector_info: RefCell::new(IndexVec::new()),
1208 thread_info: RefCell::new(IndexVec::new()),
1209 current_index: Cell::new(VectorIdx::new(0)),
1210 active_thread_count: Cell::new(1),
1211 reuse_candidates: RefCell::new(FxHashSet::default()),
1212 terminated_threads: RefCell::new(FxHashMap::default()),
1213 last_sc_fence: RefCell::new(VClock::default()),
1214 last_sc_write: RefCell::new(VClock::default()),
1217 // Setup the main-thread since it is not explicitly created:
1218 // uses vector index and thread-id 0, also the rust runtime gives
1219 // the main-thread a name of "main".
1220 let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1221 global_state.vector_info.get_mut().push(ThreadId::new(0));
1222 global_state.thread_info.get_mut().push(ThreadExtraState {
1223 vector_index: Some(index),
1224 thread_name: Some("main".to_string().into_boxed_str()),
1225 termination_vector_clock: None,
1231 // We perform data race detection when there are more than 1 active thread
1232 // and we have not temporarily disabled race detection to perform something
1234 fn race_detecting(&self) -> bool {
1235 self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1238 pub fn ongoing_action_data_race_free(&self) -> bool {
1239 self.ongoing_action_data_race_free.get()
1242 // Try to find vector index values that can potentially be re-used
1243 // by a new thread instead of a new vector index being created.
1244 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1245 let mut reuse = self.reuse_candidates.borrow_mut();
1246 let vector_clocks = self.vector_clocks.borrow();
1247 let vector_info = self.vector_info.borrow();
1248 let terminated_threads = self.terminated_threads.borrow();
1249 for &candidate in reuse.iter() {
1250 let target_timestamp = vector_clocks[candidate].clock[candidate];
1251 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1252 // The thread happens before the clock, and hence cannot report
1253 // a data-race with this the candidate index.
1254 let no_data_race = clock.clock[candidate] >= target_timestamp;
1256 // The vector represents a thread that has terminated and hence cannot
1257 // report a data-race with the candidate index.
1258 let thread_id = vector_info[clock_idx];
1259 let vector_terminated =
1260 reuse.contains(&clock_idx) || terminated_threads.contains_key(&thread_id);
1262 // The vector index cannot report a race with the candidate index
1263 // and hence allows the candidate index to be re-used.
1264 no_data_race || vector_terminated
1266 // All vector clocks for each vector index are equal to
1267 // the target timestamp, and the thread is known to have
1268 // terminated, therefore this vector clock index cannot
1269 // report any more data-races.
1270 assert!(reuse.remove(&candidate));
1271 return Some(candidate);
1277 // Hook for thread creation, enabled multi-threaded execution and marks
1278 // the current thread timestamp as happening-before the current thread.
1280 pub fn thread_created(&mut self, thread: ThreadId) {
1281 let current_index = self.current_index();
1283 // Increment the number of active threads.
1284 let active_threads = self.active_thread_count.get();
1285 self.active_thread_count.set(active_threads + 1);
1287 // Enable multi-threaded execution, there are now two threads
1288 // so data-races are now possible.
1289 self.multi_threaded.set(true);
1291 // Load and setup the associated thread metadata
1292 let mut thread_info = self.thread_info.borrow_mut();
1293 thread_info.ensure_contains_elem(thread, Default::default);
1295 // Assign a vector index for the thread, attempting to re-use an old
1296 // vector index that can no longer report any data-races if possible.
1297 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1298 // Now re-configure the re-use candidate, increment the clock
1299 // for the new sync use of the vector.
1300 let vector_clocks = self.vector_clocks.get_mut();
1301 vector_clocks[reuse_index].increment_clock(reuse_index);
1303 // Locate the old thread the vector was associated with and update
1304 // it to represent the new thread instead.
1305 let vector_info = self.vector_info.get_mut();
1306 let old_thread = vector_info[reuse_index];
1307 vector_info[reuse_index] = thread;
1309 // Mark the thread the vector index was associated with as no longer
1310 // representing a thread index.
1311 thread_info[old_thread].vector_index = None;
1315 // No vector re-use candidates available, instead create
1316 // a new vector index.
1317 let vector_info = self.vector_info.get_mut();
1318 vector_info.push(thread)
1321 log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1323 // Mark the chosen vector index as in use by the thread.
1324 thread_info[thread].vector_index = Some(created_index);
1326 // Create a thread clock set if applicable.
1327 let vector_clocks = self.vector_clocks.get_mut();
1328 if created_index == vector_clocks.next_index() {
1329 vector_clocks.push(ThreadClockSet::default());
1332 // Now load the two clocks and configure the initial state.
1333 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1335 // Join the created with current, since the current threads
1336 // previous actions happen-before the created thread.
1337 created.join_with(current);
1339 // Advance both threads after the synchronized operation.
1340 // Both operations are considered to have release semantics.
1341 current.increment_clock(current_index);
1342 created.increment_clock(created_index);
1345 /// Hook on a thread join to update the implicit happens-before relation
1346 /// between the joined thread and the current thread.
1348 pub fn thread_joined(&mut self, current_thread: ThreadId, join_thread: ThreadId) {
1349 let clocks_vec = self.vector_clocks.get_mut();
1350 let thread_info = self.thread_info.get_mut();
1352 // Load the vector clock of the current thread.
1353 let current_index = thread_info[current_thread]
1355 .expect("Performed thread join on thread with no assigned vector");
1356 let current = &mut clocks_vec[current_index];
1358 // Load the associated vector clock for the terminated thread.
1359 let join_clock = thread_info[join_thread]
1360 .termination_vector_clock
1362 .expect("Joined with thread but thread has not terminated");
1364 // The join thread happens-before the current thread
1365 // so update the current vector clock.
1366 // Is not a release operation so the clock is not incremented.
1367 current.clock.join(join_clock);
1369 // Check the number of active threads, if the value is 1
1370 // then test for potentially disabling multi-threaded execution.
1371 let active_threads = self.active_thread_count.get();
1372 if active_threads == 1 {
1373 // May potentially be able to disable multi-threaded execution.
1374 let current_clock = &clocks_vec[current_index];
1377 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1379 // All thread terminations happen-before the current clock
1380 // therefore no data-races can be reported until a new thread
1381 // is created, so disable multi-threaded execution.
1382 self.multi_threaded.set(false);
1386 // If the thread is marked as terminated but not joined
1387 // then move the thread to the re-use set.
1388 let termination = self.terminated_threads.get_mut();
1389 if let Some(index) = termination.remove(&join_thread) {
1390 let reuse = self.reuse_candidates.get_mut();
1391 reuse.insert(index);
1395 /// On thread termination, the vector-clock may re-used
1396 /// in the future once all remaining thread-clocks catch
1397 /// up with the time index of the terminated thread.
1398 /// This assigns thread termination with a unique index
1399 /// which will be used to join the thread
1400 /// This should be called strictly before any calls to
1401 /// `thread_joined`.
1403 pub fn thread_terminated(&mut self) {
1404 let current_index = self.current_index();
1406 // Increment the clock to a unique termination timestamp.
1407 let vector_clocks = self.vector_clocks.get_mut();
1408 let current_clocks = &mut vector_clocks[current_index];
1409 current_clocks.increment_clock(current_index);
1411 // Load the current thread id for the executing vector.
1412 let vector_info = self.vector_info.get_mut();
1413 let current_thread = vector_info[current_index];
1415 // Load the current thread metadata, and move to a terminated
1416 // vector state. Setting up the vector clock all join operations
1418 let thread_info = self.thread_info.get_mut();
1419 let current = &mut thread_info[current_thread];
1420 current.termination_vector_clock = Some(current_clocks.clock.clone());
1422 // Add this thread as a candidate for re-use after a thread join
1424 let termination = self.terminated_threads.get_mut();
1425 termination.insert(current_thread, current_index);
1427 // Reduce the number of active threads, now that a thread has
1429 let mut active_threads = self.active_thread_count.get();
1430 active_threads -= 1;
1431 self.active_thread_count.set(active_threads);
1434 /// Hook for updating the local tracker of the currently
1435 /// enabled thread, should always be updated whenever
1436 /// `active_thread` in thread.rs is updated.
1438 pub fn thread_set_active(&self, thread: ThreadId) {
1439 let thread_info = self.thread_info.borrow();
1440 let vector_idx = thread_info[thread]
1442 .expect("Setting thread active with no assigned vector");
1443 self.current_index.set(vector_idx);
1446 /// Hook for updating the local tracker of the threads name
1447 /// this should always mirror the local value in thread.rs
1448 /// the thread name is used for improved diagnostics
1449 /// during a data-race.
1451 pub fn thread_set_name(&mut self, thread: ThreadId, name: String) {
1452 let name = name.into_boxed_str();
1453 let thread_info = self.thread_info.get_mut();
1454 thread_info[thread].thread_name = Some(name);
1457 /// Attempt to perform a synchronized operation, this
1458 /// will perform no operation if multi-threading is
1459 /// not currently enabled.
1460 /// Otherwise it will increment the clock for the current
1461 /// vector before and after the operation for data-race
1462 /// detection between any happens-before edges the
1463 /// operation may create.
1464 fn maybe_perform_sync_operation<'tcx>(
1466 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1467 ) -> InterpResult<'tcx> {
1468 if self.multi_threaded.get() {
1469 let (index, clocks) = self.current_thread_state_mut();
1470 if op(index, clocks)? {
1471 let (_, mut clocks) = self.current_thread_state_mut();
1472 clocks.increment_clock(index);
1478 /// Internal utility to identify a thread stored internally
1479 /// returns the id and the name for better diagnostics.
1480 fn print_thread_metadata(&self, vector: VectorIdx) -> String {
1481 let thread = self.vector_info.borrow()[vector];
1482 let thread_name = &self.thread_info.borrow()[thread].thread_name;
1483 if let Some(name) = thread_name {
1484 let name: &str = name;
1485 format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), &*name)
1487 format!("Thread(id = {:?})", thread.to_u32())
1491 /// Acquire a lock, express that the previous call of
1492 /// `validate_lock_release` must happen before this.
1493 /// As this is an acquire operation, the thread timestamp is not
1495 pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
1496 let (_, mut clocks) = self.load_thread_state_mut(thread);
1497 clocks.clock.join(lock);
1500 /// Release a lock handle, express that this happens-before
1501 /// any subsequent calls to `validate_lock_acquire`.
1502 /// For normal locks this should be equivalent to `validate_lock_release_shared`
1503 /// since an acquire operation should have occurred before, however
1504 /// for futex & condvar operations this is not the case and this
1505 /// operation must be used.
1506 pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId) {
1507 let (index, mut clocks) = self.load_thread_state_mut(thread);
1508 lock.clone_from(&clocks.clock);
1509 clocks.increment_clock(index);
1512 /// Release a lock handle, express that this happens-before
1513 /// any subsequent calls to `validate_lock_acquire` as well
1514 /// as any previous calls to this function after any
1515 /// `validate_lock_release` calls.
1516 /// For normal locks this should be equivalent to `validate_lock_release`.
1517 /// This function only exists for joining over the set of concurrent readers
1518 /// in a read-write lock and should not be used for anything else.
1519 pub fn validate_lock_release_shared(&self, lock: &mut VClock, thread: ThreadId) {
1520 let (index, mut clocks) = self.load_thread_state_mut(thread);
1521 lock.join(&clocks.clock);
1522 clocks.increment_clock(index);
1525 /// Load the vector index used by the given thread as well as the set of vector clocks
1526 /// used by the thread.
1528 fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1529 let index = self.thread_info.borrow()[thread]
1531 .expect("Loading thread state for thread with no assigned vector");
1532 let ref_vector = self.vector_clocks.borrow_mut();
1533 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1537 /// Load the current vector clock in use and the current set of thread clocks
1538 /// in use for the vector.
1540 pub(super) fn current_thread_state(&self) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1541 let index = self.current_index();
1542 let ref_vector = self.vector_clocks.borrow();
1543 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1547 /// Load the current vector clock in use and the current set of thread clocks
1548 /// in use for the vector mutably for modification.
1550 pub(super) fn current_thread_state_mut(&self) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1551 let index = self.current_index();
1552 let ref_vector = self.vector_clocks.borrow_mut();
1553 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1557 /// Return the current thread, should be the same
1558 /// as the data-race active thread.
1560 fn current_index(&self) -> VectorIdx {
1561 self.current_index.get()
1564 // SC ATOMIC STORE rule in the paper.
1565 pub(super) fn sc_write(&self) {
1566 let (index, clocks) = self.current_thread_state();
1567 self.last_sc_write.borrow_mut().set_at_index(&clocks.clock, index);
1570 // SC ATOMIC READ rule in the paper.
1571 pub(super) fn sc_read(&self) {
1572 let (.., mut clocks) = self.current_thread_state_mut();
1573 clocks.read_seqcst.join(&self.last_sc_fence.borrow());