1 //! Implementation of a data-race detector using Lamport Timestamps / Vector-clocks
2 //! based on the Dynamic Race Detection for C++:
3 //! https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf
4 //! which does not report false-positives when fences are used, and gives better
5 //! accuracy in presence of read-modify-write operations.
7 //! The implementation contains modifications to correctly model the changes to the memory model in C++20
8 //! regarding the weakening of release sequences: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0982r1.html.
9 //! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
10 //! sequences is not needed.
12 //! The implementation also models races with memory allocation and deallocation via treating allocation and
13 //! deallocation as a type of write internally for detecting data-races.
15 //! This does not explore weak memory orders and so can still miss data-races
16 //! but should not report false-positives
18 //! Data-race definition from(https://en.cppreference.com/w/cpp/language/memory_model#Threads_and_data_races):
19 //! a data race occurs between two memory accesses if they are on different threads, at least one operation
20 //! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
21 //! for full definition.
23 //! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
24 //! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
25 //! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
26 //! on are not considered. Since the thread's vector clock will only increase and a data-race implies that
27 //! there is some index x where clock[x] > thread_clock, when this is true clock[candidate-idx] > thread_clock
28 //! can never hold and hence a data-race can never be reported in that vector index again.
29 //! This means that the thread-index can be safely re-used, starting on the next timestamp for the newly created
32 //! The sequentially consistent ordering corresponds to the ordering that the threads
33 //! are currently scheduled, this means that the data-race detector has no additional
34 //! logic for sequentially consistent accesses at the moment since they are indistinguishable
35 //! from acquire/release operations. If weak memory orderings are explored then this
36 //! may need to change or be updated accordingly.
38 //! Per the C++ spec for the memory model a sequentially consistent operation:
39 //! "A load operation with this memory order performs an acquire operation,
40 //! a store performs a release operation, and read-modify-write performs
41 //! both an acquire operation and a release operation, plus a single total
42 //! order exists in which all threads observe all modifications in the same
43 //! order (see Sequentially-consistent ordering below) "
44 //! So in the absence of weak memory effects a seq-cst load & a seq-cst store is identical
45 //! to a acquire load and a release store given the global sequentially consistent order
48 //! The timestamps used in the data-race detector assign each sequence of non-atomic operations
49 //! followed by a single atomic or concurrent operation a single timestamp.
50 //! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
51 //! This is because extra increment operations between the operations in the sequence are not
52 //! required for accurate reporting of data-race values.
54 //! As per the paper a threads timestamp is only incremented after a release operation is performed
55 //! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared
56 //! code some atomic operations may increment the timestamp when not necessary but this has no effect
57 //! on the data-race detection code.
60 //! currently we have our own local copy of the currently active thread index and names, this is due
61 //! in part to the inability to access the current location of threads.active_thread inside the AllocExtra
62 //! read, write and deallocate functions and should be cleaned up in the future.
65 cell::{Cell, Ref, RefCell, RefMut},
71 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
72 use rustc_index::vec::{Idx, IndexVec};
73 use rustc_middle::{mir, ty::layout::TyAndLayout};
74 use rustc_target::abi::Size;
77 ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
78 OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
79 VectorIdx, MemoryKind, MiriMemoryKind
82 pub type AllocExtra = VClockAlloc;
83 pub type MemoryExtra = Rc<GlobalState>;
85 /// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive).
86 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
95 /// Valid atomic read operations, subset of atomic::Ordering.
96 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
97 pub enum AtomicReadOp {
103 /// Valid atomic write operations, subset of atomic::Ordering.
104 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
105 pub enum AtomicWriteOp {
111 /// Valid atomic fence operations, subset of atomic::Ordering.
112 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
113 pub enum AtomicFenceOp {
120 /// The current set of vector clocks describing the state
121 /// of a thread, contains the happens-before clock and
122 /// additional metadata to model atomic fence operations.
123 #[derive(Clone, Default, Debug)]
124 struct ThreadClockSet {
125 /// The increasing clock representing timestamps
126 /// that happen-before this thread.
129 /// The set of timestamps that will happen-before this
130 /// thread once it performs an acquire fence.
131 fence_acquire: VClock,
133 /// The last timestamp of happens-before relations that
134 /// have been released by this thread by a fence.
135 fence_release: VClock,
138 impl ThreadClockSet {
139 /// Apply the effects of a release fence to this
140 /// set of thread vector clocks.
142 fn apply_release_fence(&mut self) {
143 self.fence_release.clone_from(&self.clock);
146 /// Apply the effects of a acquire fence to this
147 /// set of thread vector clocks.
149 fn apply_acquire_fence(&mut self) {
150 self.clock.join(&self.fence_acquire);
153 /// Increment the happens-before clock at a
156 fn increment_clock(&mut self, index: VectorIdx) {
157 self.clock.increment_index(index);
160 /// Join the happens-before clock with that of
161 /// another thread, used to model thread join
163 fn join_with(&mut self, other: &ThreadClockSet) {
164 self.clock.join(&other.clock);
168 /// Error returned by finding a data race
169 /// should be elaborated upon.
170 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
173 /// Externally stored memory cell clocks
174 /// explicitly to reduce memory usage for the
175 /// common case where no atomic operations
176 /// exists on the memory cell.
177 #[derive(Clone, PartialEq, Eq, Default, Debug)]
178 struct AtomicMemoryCellClocks {
179 /// The clock-vector of the timestamp of the last atomic
180 /// read operation performed by each thread.
181 /// This detects potential data-races between atomic read
182 /// and non-atomic write operations.
185 /// The clock-vector of the timestamp of the last atomic
186 /// write operation performed by each thread.
187 /// This detects potential data-races between atomic write
188 /// and non-atomic read or write operations.
189 write_vector: VClock,
191 /// Synchronization vector for acquire-release semantics
192 /// contains the vector of timestamps that will
193 /// happen-before a thread if an acquire-load is
194 /// performed on the data.
198 /// Type of write operation: allocating memory
199 /// non-atomic writes and deallocating memory
200 /// are all treated as writes for the purpose
201 /// of the data-race detector.
202 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
207 /// Standard unsynchronized write.
210 /// Deallocate memory.
211 /// Note that when memory is deallocated first, later non-atomic accesses
212 /// will be reported as use-after-free, not as data races.
213 /// (Same for `Allocate` above.)
217 fn get_descriptor(self) -> &'static str {
219 WriteType::Allocate => "Allocate",
220 WriteType::Write => "Write",
221 WriteType::Deallocate => "Deallocate",
226 /// Memory Cell vector clock metadata
227 /// for data-race detection.
228 #[derive(Clone, PartialEq, Eq, Debug)]
229 struct MemoryCellClocks {
230 /// The vector-clock timestamp of the last write
231 /// corresponding to the writing threads timestamp.
234 /// The identifier of the vector index, corresponding to a thread
235 /// that performed the last write operation.
236 write_index: VectorIdx,
238 /// The type of operation that the write index represents,
239 /// either newly allocated memory, a non-atomic write or
240 /// a deallocation of memory.
241 write_type: WriteType,
243 /// The vector-clock of the timestamp of the last read operation
244 /// performed by a thread since the last write operation occurred.
245 /// It is reset to zero on each write operation.
248 /// Atomic acquire & release sequence tracking clocks.
249 /// For non-atomic memory in the common case this
250 /// value is set to None.
251 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
254 impl MemoryCellClocks {
255 /// Create a new set of clocks representing memory allocated
256 /// at a given vector timestamp and index.
257 fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
259 read: VClock::default(),
261 write_index: alloc_index,
262 write_type: WriteType::Allocate,
267 /// Load the internal atomic memory cells if they exist.
269 fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
270 match &self.atomic_ops {
271 Some(op) => Some(&*op),
276 /// Load or create the internal atomic memory metadata
277 /// if it does not exist.
279 fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
280 self.atomic_ops.get_or_insert_with(Default::default)
283 /// Update memory cell data-race tracking for atomic
284 /// load acquire semantics, is a no-op if this memory was
285 /// not used previously as atomic memory.
288 clocks: &mut ThreadClockSet,
290 ) -> Result<(), DataRace> {
291 self.atomic_read_detect(clocks, index)?;
292 if let Some(atomic) = self.atomic() {
293 clocks.clock.join(&atomic.sync_vector);
298 /// Update memory cell data-race tracking for atomic
299 /// load relaxed semantics, is a no-op if this memory was
300 /// not used previously as atomic memory.
303 clocks: &mut ThreadClockSet,
305 ) -> Result<(), DataRace> {
306 self.atomic_read_detect(clocks, index)?;
307 if let Some(atomic) = self.atomic() {
308 clocks.fence_acquire.join(&atomic.sync_vector);
313 /// Update the memory cell data-race tracking for atomic
314 /// store release semantics.
315 fn store_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
316 self.atomic_write_detect(clocks, index)?;
317 let atomic = self.atomic_mut();
318 atomic.sync_vector.clone_from(&clocks.clock);
322 /// Update the memory cell data-race tracking for atomic
323 /// store relaxed semantics.
324 fn store_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
325 self.atomic_write_detect(clocks, index)?;
327 // The handling of release sequences was changed in C++20 and so
328 // the code here is different to the paper since now all relaxed
329 // stores block release sequences. The exception for same-thread
330 // relaxed stores has been removed.
331 let atomic = self.atomic_mut();
332 atomic.sync_vector.clone_from(&clocks.fence_release);
336 /// Update the memory cell data-race tracking for atomic
337 /// store release semantics for RMW operations.
338 fn rmw_release(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
339 self.atomic_write_detect(clocks, index)?;
340 let atomic = self.atomic_mut();
341 atomic.sync_vector.join(&clocks.clock);
345 /// Update the memory cell data-race tracking for atomic
346 /// store relaxed semantics for RMW operations.
347 fn rmw_relaxed(&mut self, clocks: &ThreadClockSet, index: VectorIdx) -> Result<(), DataRace> {
348 self.atomic_write_detect(clocks, index)?;
349 let atomic = self.atomic_mut();
350 atomic.sync_vector.join(&clocks.fence_release);
354 /// Detect data-races with an atomic read, caused by a non-atomic write that does
355 /// not happen-before the atomic-read.
356 fn atomic_read_detect(
358 clocks: &ThreadClockSet,
360 ) -> Result<(), DataRace> {
361 log::trace!("Atomic read with vectors: {:#?} :: {:#?}", self, clocks);
362 if self.write <= clocks.clock[self.write_index] {
363 let atomic = self.atomic_mut();
364 atomic.read_vector.set_at_index(&clocks.clock, index);
371 /// Detect data-races with an atomic write, either with a non-atomic read or with
372 /// a non-atomic write.
373 fn atomic_write_detect(
375 clocks: &ThreadClockSet,
377 ) -> Result<(), DataRace> {
378 log::trace!("Atomic write with vectors: {:#?} :: {:#?}", self, clocks);
379 if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
380 let atomic = self.atomic_mut();
381 atomic.write_vector.set_at_index(&clocks.clock, index);
388 /// Detect races for non-atomic read operations at the current memory cell
389 /// returns true if a data-race is detected.
392 clocks: &ThreadClockSet,
394 ) -> Result<(), DataRace> {
395 log::trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, clocks);
396 if self.write <= clocks.clock[self.write_index] {
397 let race_free = if let Some(atomic) = self.atomic() {
398 atomic.write_vector <= clocks.clock
403 self.read.set_at_index(&clocks.clock, index);
413 /// Detect races for non-atomic write operations at the current memory cell
414 /// returns true if a data-race is detected.
415 fn write_race_detect(
417 clocks: &ThreadClockSet,
419 write_type: WriteType,
420 ) -> Result<(), DataRace> {
421 log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
422 if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
423 let race_free = if let Some(atomic) = self.atomic() {
424 atomic.write_vector <= clocks.clock && atomic.read_vector <= clocks.clock
429 self.write = clocks.clock[index];
430 self.write_index = index;
431 self.write_type = write_type;
432 self.read.set_zero_vector();
443 /// Evaluation context extensions.
444 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
445 pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
446 /// Atomic variant of read_scalar_at_offset.
447 fn read_scalar_at_offset_atomic(
451 layout: TyAndLayout<'tcx>,
452 atomic: AtomicReadOp,
453 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
454 let this = self.eval_context_ref();
455 let op_place = this.deref_operand(op)?;
456 let offset = Size::from_bytes(offset);
458 // Ensure that the following read at an offset is within bounds.
459 assert!(op_place.layout.size >= offset + layout.size);
460 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
461 this.read_scalar_atomic(value_place, atomic)
464 /// Atomic variant of write_scalar_at_offset.
465 fn write_scalar_at_offset_atomic(
469 value: impl Into<ScalarMaybeUninit<Tag>>,
470 layout: TyAndLayout<'tcx>,
471 atomic: AtomicWriteOp,
472 ) -> InterpResult<'tcx> {
473 let this = self.eval_context_mut();
474 let op_place = this.deref_operand(op)?;
475 let offset = Size::from_bytes(offset);
477 // Ensure that the following read at an offset is within bounds.
478 assert!(op_place.layout.size >= offset + layout.size);
479 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
480 this.write_scalar_atomic(value.into(), value_place, atomic)
483 /// Perform an atomic read operation at the memory location.
484 fn read_scalar_atomic(
486 place: MPlaceTy<'tcx, Tag>,
487 atomic: AtomicReadOp,
488 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
489 let this = self.eval_context_ref();
490 let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place.into()))?;
491 self.validate_atomic_load(place, atomic)?;
495 /// Perform an atomic write operation at the memory location.
496 fn write_scalar_atomic(
498 val: ScalarMaybeUninit<Tag>,
499 dest: MPlaceTy<'tcx, Tag>,
500 atomic: AtomicWriteOp,
501 ) -> InterpResult<'tcx> {
502 let this = self.eval_context_mut();
503 this.allow_data_races_mut(move |this| this.write_scalar(val, dest.into()))?;
504 self.validate_atomic_store(dest, atomic)
507 /// Perform a atomic operation on a memory location.
508 fn atomic_op_immediate(
510 place: MPlaceTy<'tcx, Tag>,
511 rhs: ImmTy<'tcx, Tag>,
515 ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
516 let this = self.eval_context_mut();
518 let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
520 // Atomics wrap around on overflow.
521 let val = this.binary_op(op, old, rhs)?;
522 let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
523 this.allow_data_races_mut(|this| this.write_immediate(*val, place.into()))?;
525 this.validate_atomic_rmw(place, atomic)?;
529 /// Perform an atomic exchange with a memory place and a new
530 /// scalar value, the old value is returned.
531 fn atomic_exchange_scalar(
533 place: MPlaceTy<'tcx, Tag>,
534 new: ScalarMaybeUninit<Tag>,
536 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
537 let this = self.eval_context_mut();
539 let old = this.allow_data_races_mut(|this| this.read_scalar(place.into()))?;
540 this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
541 this.validate_atomic_rmw(place, atomic)?;
545 /// Perform an atomic compare and exchange at a given memory location.
546 /// On success an atomic RMW operation is performed and on failure
547 /// only an atomic read occurs.
548 fn atomic_compare_exchange_scalar(
550 place: MPlaceTy<'tcx, Tag>,
551 expect_old: ImmTy<'tcx, Tag>,
552 new: ScalarMaybeUninit<Tag>,
555 ) -> InterpResult<'tcx, Immediate<Tag>> {
556 let this = self.eval_context_mut();
558 // Failure ordering cannot be stronger than success ordering, therefore first attempt
559 // to read with the failure ordering and if successful then try again with the success
560 // read ordering and write in the success case.
561 // Read as immediate for the sake of `binary_op()`
562 let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;
564 // `binary_op` will bail if either of them is not a scalar.
565 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
566 let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
568 // Update ptr depending on comparison.
569 // if successful, perform a full rw-atomic validation
570 // otherwise treat this as an atomic load with the fail ordering.
572 this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
573 this.validate_atomic_rmw(place, success)?;
575 this.validate_atomic_load(place, fail)?;
578 // Return the old value.
582 /// Update the data-race detector for an atomic read occurring at the
583 /// associated memory-place and on the current thread.
584 fn validate_atomic_load(
586 place: MPlaceTy<'tcx, Tag>,
587 atomic: AtomicReadOp,
588 ) -> InterpResult<'tcx> {
589 let this = self.eval_context_ref();
590 this.validate_atomic_op(
594 move |memory, clocks, index, atomic| {
595 if atomic == AtomicReadOp::Relaxed {
596 memory.load_relaxed(&mut *clocks, index)
598 memory.load_acquire(&mut *clocks, index)
604 /// Update the data-race detector for an atomic write occurring at the
605 /// associated memory-place and on the current thread.
606 fn validate_atomic_store(
608 place: MPlaceTy<'tcx, Tag>,
609 atomic: AtomicWriteOp,
610 ) -> InterpResult<'tcx> {
611 let this = self.eval_context_ref();
612 this.validate_atomic_op(
616 move |memory, clocks, index, atomic| {
617 if atomic == AtomicWriteOp::Relaxed {
618 memory.store_relaxed(clocks, index)
620 memory.store_release(clocks, index)
626 /// Update the data-race detector for an atomic read-modify-write occurring
627 /// at the associated memory place and on the current thread.
628 fn validate_atomic_rmw(
630 place: MPlaceTy<'tcx, Tag>,
632 ) -> InterpResult<'tcx> {
634 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
635 let release = matches!(atomic, Release | AcqRel | SeqCst);
636 let this = self.eval_context_ref();
637 this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
639 memory.load_acquire(clocks, index)?;
641 memory.load_relaxed(clocks, index)?;
644 memory.rmw_release(clocks, index)
646 memory.rmw_relaxed(clocks, index)
651 /// Update the data-race detector for an atomic fence on the current thread.
652 fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
653 let this = self.eval_context_mut();
654 if let Some(data_race) = &this.memory.extra.data_race {
655 data_race.maybe_perform_sync_operation(move |index, mut clocks| {
656 log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
658 // Apply data-race detection for the current fences
659 // this treats AcqRel and SeqCst as the same as a acquire
660 // and release fence applied in the same timestamp.
661 if atomic != AtomicFenceOp::Release {
662 // Either Acquire | AcqRel | SeqCst
663 clocks.apply_acquire_fence();
665 if atomic != AtomicFenceOp::Acquire {
666 // Either Release | AcqRel | SeqCst
667 clocks.apply_release_fence();
670 // Increment timestamp in case of release semantics.
671 Ok(atomic != AtomicFenceOp::Acquire)
678 fn reset_vector_clocks(
682 ) -> InterpResult<'tcx> {
683 let this = self.eval_context_mut();
684 if let Some(data_race) = &mut this.memory.extra.data_race {
685 if data_race.multi_threaded.get() {
686 let alloc_meta = this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap();
687 alloc_meta.reset_clocks(ptr.offset, size);
694 /// Vector clock metadata for a logical memory allocation.
695 #[derive(Debug, Clone)]
696 pub struct VClockAlloc {
697 /// Assigning each byte a MemoryCellClocks.
698 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
700 /// Pointer to global state.
705 /// Create a new data-race detector for newly allocated memory.
706 pub fn new_allocation(global: &MemoryExtra, len: Size, kind: MemoryKind<MiriMemoryKind>) -> VClockAlloc {
707 let (alloc_timestamp, alloc_index) = match kind {
708 // User allocated and stack memory should track allocation.
710 MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
711 ) | MemoryKind::Stack => {
712 let (alloc_index, clocks) = global.current_thread_state();
713 let alloc_timestamp = clocks.clock[alloc_index];
714 (alloc_timestamp, alloc_index)
716 // Other global memory should trace races but be allocated at the 0 timestamp.
718 MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
719 MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
720 ) | MemoryKind::CallerLocation | MemoryKind::Vtable => {
721 (0, VectorIdx::MAX_INDEX)
725 global: Rc::clone(global),
726 alloc_ranges: RefCell::new(RangeMap::new(
727 len, MemoryCellClocks::new(alloc_timestamp, alloc_index)
732 fn reset_clocks(&mut self, offset: Size, len: Size) {
733 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
734 for (_, range) in alloc_ranges.iter_mut(offset, len) {
735 // Reset the portion of the range
736 *range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX);
740 // Find an index, if one exists where the value
741 // in `l` is greater than the value in `r`.
742 fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
743 log::trace!("Find index where not {:?} <= {:?}", l, r);
744 let l_slice = l.as_slice();
745 let r_slice = r.as_slice();
750 .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
752 if l_slice.len() > r_slice.len() {
753 // By invariant, if l_slice is longer
754 // then one element must be larger.
755 // This just validates that this is true
756 // and reports earlier elements first.
757 let l_remainder_slice = &l_slice[r_slice.len()..];
758 let idx = l_remainder_slice
761 .find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) })
762 .expect("Invalid VClock Invariant");
763 Some(idx + r_slice.len())
768 .map(|idx| VectorIdx::new(idx))
771 /// Report a data-race found in the program.
772 /// This finds the two racing threads and the type
773 /// of data-race that occurred. This will also
774 /// return info about the memory location the data-race
778 fn report_data_race<'tcx>(
779 global: &MemoryExtra,
780 range: &MemoryCellClocks,
783 pointer: Pointer<Tag>,
785 ) -> InterpResult<'tcx> {
786 let (current_index, current_clocks) = global.current_thread_state();
788 let (other_action, other_thread, other_clock) = if range.write
789 > current_clocks.clock[range.write_index]
791 // Convert the write action into the vector clock it
792 // represents for diagnostic purposes.
793 write_clock = VClock::new_with_index(range.write_index, range.write);
794 (range.write_type.get_descriptor(), range.write_index, &write_clock)
795 } else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) {
796 ("Read", idx, &range.read)
797 } else if !is_atomic {
798 if let Some(atomic) = range.atomic() {
799 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock)
801 ("Atomic Store", idx, &atomic.write_vector)
802 } else if let Some(idx) =
803 Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock)
805 ("Atomic Load", idx, &atomic.read_vector)
808 "Failed to report data-race for non-atomic operation: no race found"
813 "Failed to report data-race for non-atomic operation: no atomic component"
817 unreachable!("Failed to report data-race for atomic operation")
820 // Load elaborated thread information about the racing thread actions.
821 let current_thread_info = global.print_thread_metadata(current_index);
822 let other_thread_info = global.print_thread_metadata(other_thread);
824 // Throw the data-race detection.
826 "Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
827 \n\t\t -current vector clock = {:?}\
828 \n\t\t -conflicting timestamp = {:?}",
834 pointer.offset.bytes(),
836 current_clocks.clock,
841 /// Detect data-races for an unsynchronized read operation, will not perform
842 /// data-race detection if `multi-threaded` is false, either due to no threads
843 /// being created or if it is temporarily disabled during a racy read or write
844 /// operation for which data-race detection is handled separately, for example
845 /// atomic read operations.
846 pub fn read<'tcx>(&self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
847 if self.global.multi_threaded.get() {
848 let (index, clocks) = self.global.current_thread_state();
849 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
850 for (_, range) in alloc_ranges.iter_mut(pointer.offset, len) {
851 if let Err(DataRace) = range.read_race_detect(&*clocks, index) {
853 return Self::report_data_race(
869 // Shared code for detecting data-races on unique access to a section of memory
870 fn unique_access<'tcx>(
872 pointer: Pointer<Tag>,
874 write_type: WriteType,
875 ) -> InterpResult<'tcx> {
876 if self.global.multi_threaded.get() {
877 let (index, clocks) = self.global.current_thread_state();
878 for (_, range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
879 if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) {
881 return Self::report_data_race(
884 write_type.get_descriptor(),
897 /// Detect data-races for an unsynchronized write operation, will not perform
898 /// data-race threads if `multi-threaded` is false, either due to no threads
899 /// being created or if it is temporarily disabled during a racy read or write
901 pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
902 self.unique_access(pointer, len, WriteType::Write)
905 /// Detect data-races for an unsynchronized deallocate operation, will not perform
906 /// data-race threads if `multi-threaded` is false, either due to no threads
907 /// being created or if it is temporarily disabled during a racy read or write
909 pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
910 self.unique_access(pointer, len, WriteType::Deallocate)
914 impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
915 trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
916 // Temporarily allow data-races to occur, this should only be
917 // used if either one of the appropriate `validate_atomic` functions
918 // will be called to treat a memory access as atomic or if the memory
919 // being accessed should be treated as internal state, that cannot be
920 // accessed by the interpreted program.
922 fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
923 let this = self.eval_context_ref();
924 let old = if let Some(data_race) = &this.memory.extra.data_race {
925 data_race.multi_threaded.replace(false)
929 let result = op(this);
930 if let Some(data_race) = &this.memory.extra.data_race {
931 data_race.multi_threaded.set(old);
936 /// Same as `allow_data_races_ref`, this temporarily disables any data-race detection and
937 /// so should only be used for atomic operations or internal state that the program cannot
940 fn allow_data_races_mut<R>(
942 op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R,
944 let this = self.eval_context_mut();
945 let old = if let Some(data_race) = &this.memory.extra.data_race {
946 data_race.multi_threaded.replace(false)
950 let result = op(this);
951 if let Some(data_race) = &this.memory.extra.data_race {
952 data_race.multi_threaded.set(old);
957 /// Generic atomic operation implementation,
958 /// this accesses memory via get_raw instead of
959 /// get_raw_mut, due to issues calling get_raw_mut
960 /// for atomic loads from read-only memory.
961 /// FIXME: is this valid, or should get_raw_mut be used for
962 /// atomic-stores/atomic-rmw?
963 fn validate_atomic_op<A: Debug + Copy>(
965 place: MPlaceTy<'tcx, Tag>,
969 &mut MemoryCellClocks,
973 ) -> Result<(), DataRace>,
974 ) -> InterpResult<'tcx> {
975 let this = self.eval_context_ref();
976 if let Some(data_race) = &this.memory.extra.data_race {
977 if data_race.multi_threaded.get() {
978 // Load and log the atomic operation.
979 let place_ptr = place.ptr.assert_ptr();
980 let size = place.layout.size;
982 &this.memory.get_raw(place_ptr.alloc_id)?.extra.data_race.as_ref().unwrap();
984 "Atomic op({}) with ordering {:?} on memory({:?}, offset={}, size={})",
988 place_ptr.offset.bytes(),
992 // Perform the atomic operation.
993 let data_race = &alloc_meta.global;
994 data_race.maybe_perform_sync_operation(|index, mut clocks| {
996 alloc_meta.alloc_ranges.borrow_mut().iter_mut(place_ptr.offset, size)
998 if let Err(DataRace) = op(range, &mut *clocks, index, atomic) {
1000 return VClockAlloc::report_data_race(
1011 // This conservatively assumes all operations have release semantics
1015 // Log changes to atomic memory.
1016 if log::log_enabled!(log::Level::Trace) {
1017 for (_, range) in alloc_meta.alloc_ranges.borrow().iter(place_ptr.offset, size)
1020 "Updated atomic memory({:?}, offset={}, size={}) to {:#?}",
1021 place.ptr.assert_ptr().alloc_id,
1022 place_ptr.offset.bytes(),
1034 /// Extra metadata associated with a thread.
1035 #[derive(Debug, Clone, Default)]
1036 struct ThreadExtraState {
1037 /// The current vector index in use by the
1038 /// thread currently, this is set to None
1039 /// after the vector index has been re-used
1040 /// and hence the value will never need to be
1041 /// read during data-race reporting.
1042 vector_index: Option<VectorIdx>,
1044 /// The name of the thread, updated for better
1045 /// diagnostics when reporting detected data
1047 thread_name: Option<Box<str>>,
1049 /// Thread termination vector clock, this
1050 /// is set on thread termination and is used
1051 /// for joining on threads since the vector_index
1052 /// may be re-used when the join operation occurs.
1053 termination_vector_clock: Option<VClock>,
1056 /// Global data-race detection state, contains the currently
1057 /// executing thread as well as the vector-clocks associated
1058 /// with each of the threads.
1059 #[derive(Debug, Clone)]
1060 pub struct GlobalState {
1061 /// Set to true once the first additional
1062 /// thread has launched, due to the dependency
1063 /// between before and after a thread launch.
1064 /// Any data-races must be recorded after this
1065 /// so concurrent execution can ignore recording
1067 multi_threaded: Cell<bool>,
1069 /// Mapping of a vector index to a known set of thread
1070 /// clocks, this is not directly mapping from a thread id
1071 /// since it may refer to multiple threads.
1072 vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
1074 /// Mapping of a given vector index to the current thread
1075 /// that the execution is representing, this may change
1076 /// if a vector index is re-assigned to a new thread.
1077 vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
1079 /// The mapping of a given thread to associated thread metadata.
1080 thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
1082 /// The current vector index being executed.
1083 current_index: Cell<VectorIdx>,
1085 /// Potential vector indices that could be re-used on thread creation
1086 /// values are inserted here on after the thread has terminated and
1087 /// been joined with, and hence may potentially become free
1088 /// for use as the index for a new thread.
1089 /// Elements in this set may still require the vector index to
1090 /// report data-races, and can only be re-used after all
1091 /// active vector-clocks catch up with the threads timestamp.
1092 reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
1094 /// Counts the number of threads that are currently active
1095 /// if the number of active threads reduces to 1 and then
1096 /// a join operation occurs with the remaining main thread
1097 /// then multi-threaded execution may be disabled.
1098 active_thread_count: Cell<usize>,
1100 /// This contains threads that have terminated, but not yet joined
1101 /// and so cannot become re-use candidates until a join operation
1103 /// The associated vector index will be moved into re-use candidates
1104 /// after the join operation occurs.
1105 terminated_threads: RefCell<FxHashMap<ThreadId, VectorIdx>>,
1109 /// Create a new global state, setup with just thread-id=0
1110 /// advanced to timestamp = 1.
1111 pub fn new() -> Self {
1112 let global_state = GlobalState {
1113 multi_threaded: Cell::new(false),
1114 vector_clocks: RefCell::new(IndexVec::new()),
1115 vector_info: RefCell::new(IndexVec::new()),
1116 thread_info: RefCell::new(IndexVec::new()),
1117 current_index: Cell::new(VectorIdx::new(0)),
1118 active_thread_count: Cell::new(1),
1119 reuse_candidates: RefCell::new(FxHashSet::default()),
1120 terminated_threads: RefCell::new(FxHashMap::default()),
1123 // Setup the main-thread since it is not explicitly created:
1124 // uses vector index and thread-id 0, also the rust runtime gives
1125 // the main-thread a name of "main".
1126 let index = global_state.vector_clocks.borrow_mut().push(ThreadClockSet::default());
1127 global_state.vector_info.borrow_mut().push(ThreadId::new(0));
1128 global_state.thread_info.borrow_mut().push(ThreadExtraState {
1129 vector_index: Some(index),
1130 thread_name: Some("main".to_string().into_boxed_str()),
1131 termination_vector_clock: None,
1137 // Try to find vector index values that can potentially be re-used
1138 // by a new thread instead of a new vector index being created.
1139 fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1140 let mut reuse = self.reuse_candidates.borrow_mut();
1141 let vector_clocks = self.vector_clocks.borrow();
1142 let vector_info = self.vector_info.borrow();
1143 let terminated_threads = self.terminated_threads.borrow();
1144 for &candidate in reuse.iter() {
1145 let target_timestamp = vector_clocks[candidate].clock[candidate];
1146 if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1147 // The thread happens before the clock, and hence cannot report
1148 // a data-race with this the candidate index.
1149 let no_data_race = clock.clock[candidate] >= target_timestamp;
1151 // The vector represents a thread that has terminated and hence cannot
1152 // report a data-race with the candidate index.
1153 let thread_id = vector_info[clock_idx];
1154 let vector_terminated =
1155 reuse.contains(&clock_idx) || terminated_threads.contains_key(&thread_id);
1157 // The vector index cannot report a race with the candidate index
1158 // and hence allows the candidate index to be re-used.
1159 no_data_race || vector_terminated
1161 // All vector clocks for each vector index are equal to
1162 // the target timestamp, and the thread is known to have
1163 // terminated, therefore this vector clock index cannot
1164 // report any more data-races.
1165 assert!(reuse.remove(&candidate));
1166 return Some(candidate);
1172 // Hook for thread creation, enabled multi-threaded execution and marks
1173 // the current thread timestamp as happening-before the current thread.
1175 pub fn thread_created(&self, thread: ThreadId) {
1176 let current_index = self.current_index();
1178 // Increment the number of active threads.
1179 let active_threads = self.active_thread_count.get();
1180 self.active_thread_count.set(active_threads + 1);
1182 // Enable multi-threaded execution, there are now two threads
1183 // so data-races are now possible.
1184 self.multi_threaded.set(true);
1186 // Load and setup the associated thread metadata
1187 let mut thread_info = self.thread_info.borrow_mut();
1188 thread_info.ensure_contains_elem(thread, Default::default);
1190 // Assign a vector index for the thread, attempting to re-use an old
1191 // vector index that can no longer report any data-races if possible.
1192 let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1193 // Now re-configure the re-use candidate, increment the clock
1194 // for the new sync use of the vector.
1195 let mut vector_clocks = self.vector_clocks.borrow_mut();
1196 vector_clocks[reuse_index].increment_clock(reuse_index);
1198 // Locate the old thread the vector was associated with and update
1199 // it to represent the new thread instead.
1200 let mut vector_info = self.vector_info.borrow_mut();
1201 let old_thread = vector_info[reuse_index];
1202 vector_info[reuse_index] = thread;
1204 // Mark the thread the vector index was associated with as no longer
1205 // representing a thread index.
1206 thread_info[old_thread].vector_index = None;
1210 // No vector re-use candidates available, instead create
1211 // a new vector index.
1212 let mut vector_info = self.vector_info.borrow_mut();
1213 vector_info.push(thread)
1216 log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1218 // Mark the chosen vector index as in use by the thread.
1219 thread_info[thread].vector_index = Some(created_index);
1221 // Create a thread clock set if applicable.
1222 let mut vector_clocks = self.vector_clocks.borrow_mut();
1223 if created_index == vector_clocks.next_index() {
1224 vector_clocks.push(ThreadClockSet::default());
1227 // Now load the two clocks and configure the initial state.
1228 let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1230 // Join the created with current, since the current threads
1231 // previous actions happen-before the created thread.
1232 created.join_with(current);
1234 // Advance both threads after the synchronized operation.
1235 // Both operations are considered to have release semantics.
1236 current.increment_clock(current_index);
1237 created.increment_clock(created_index);
1240 /// Hook on a thread join to update the implicit happens-before relation
1241 /// between the joined thread and the current thread.
1243 pub fn thread_joined(&self, current_thread: ThreadId, join_thread: ThreadId) {
1244 let mut clocks_vec = self.vector_clocks.borrow_mut();
1245 let thread_info = self.thread_info.borrow();
1247 // Load the vector clock of the current thread.
1248 let current_index = thread_info[current_thread]
1250 .expect("Performed thread join on thread with no assigned vector");
1251 let current = &mut clocks_vec[current_index];
1253 // Load the associated vector clock for the terminated thread.
1254 let join_clock = thread_info[join_thread]
1255 .termination_vector_clock
1257 .expect("Joined with thread but thread has not terminated");
1260 // The join thread happens-before the current thread
1261 // so update the current vector clock.
1262 // Is not a release operation so the clock is not incremented.
1263 current.clock.join(join_clock);
1265 // Check the number of active threads, if the value is 1
1266 // then test for potentially disabling multi-threaded execution.
1267 let active_threads = self.active_thread_count.get();
1268 if active_threads == 1 {
1269 // May potentially be able to disable multi-threaded execution.
1270 let current_clock = &clocks_vec[current_index];
1273 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1275 // All thread terminations happen-before the current clock
1276 // therefore no data-races can be reported until a new thread
1277 // is created, so disable multi-threaded execution.
1278 self.multi_threaded.set(false);
1282 // If the thread is marked as terminated but not joined
1283 // then move the thread to the re-use set.
1284 let mut termination = self.terminated_threads.borrow_mut();
1285 if let Some(index) = termination.remove(&join_thread) {
1286 let mut reuse = self.reuse_candidates.borrow_mut();
1287 reuse.insert(index);
1291 /// On thread termination, the vector-clock may re-used
1292 /// in the future once all remaining thread-clocks catch
1293 /// up with the time index of the terminated thread.
1294 /// This assigns thread termination with a unique index
1295 /// which will be used to join the thread
1296 /// This should be called strictly before any calls to
1297 /// `thread_joined`.
1299 pub fn thread_terminated(&self) {
1300 let current_index = self.current_index();
1302 // Increment the clock to a unique termination timestamp.
1303 let mut vector_clocks = self.vector_clocks.borrow_mut();
1304 let current_clocks = &mut vector_clocks[current_index];
1305 current_clocks.increment_clock(current_index);
1307 // Load the current thread id for the executing vector.
1308 let vector_info = self.vector_info.borrow();
1309 let current_thread = vector_info[current_index];
1311 // Load the current thread metadata, and move to a terminated
1312 // vector state. Setting up the vector clock all join operations
1314 let mut thread_info = self.thread_info.borrow_mut();
1315 let current = &mut thread_info[current_thread];
1316 current.termination_vector_clock = Some(current_clocks.clock.clone());
1318 // Add this thread as a candidate for re-use after a thread join
1320 let mut termination = self.terminated_threads.borrow_mut();
1321 termination.insert(current_thread, current_index);
1323 // Reduce the number of active threads, now that a thread has
1325 let mut active_threads = self.active_thread_count.get();
1326 active_threads -= 1;
1327 self.active_thread_count.set(active_threads);
1330 /// Hook for updating the local tracker of the currently
1331 /// enabled thread, should always be updated whenever
1332 /// `active_thread` in thread.rs is updated.
1334 pub fn thread_set_active(&self, thread: ThreadId) {
1335 let thread_info = self.thread_info.borrow();
1336 let vector_idx = thread_info[thread]
1338 .expect("Setting thread active with no assigned vector");
1339 self.current_index.set(vector_idx);
1342 /// Hook for updating the local tracker of the threads name
1343 /// this should always mirror the local value in thread.rs
1344 /// the thread name is used for improved diagnostics
1345 /// during a data-race.
1347 pub fn thread_set_name(&self, thread: ThreadId, name: String) {
1348 let name = name.into_boxed_str();
1349 let mut thread_info = self.thread_info.borrow_mut();
1350 thread_info[thread].thread_name = Some(name);
1353 /// Attempt to perform a synchronized operation, this
1354 /// will perform no operation if multi-threading is
1355 /// not currently enabled.
1356 /// Otherwise it will increment the clock for the current
1357 /// vector before and after the operation for data-race
1358 /// detection between any happens-before edges the
1359 /// operation may create.
1360 fn maybe_perform_sync_operation<'tcx>(
1362 op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1363 ) -> InterpResult<'tcx> {
1364 if self.multi_threaded.get() {
1365 let (index, clocks) = self.current_thread_state_mut();
1366 if op(index, clocks)? {
1367 let (_, mut clocks) = self.current_thread_state_mut();
1368 clocks.increment_clock(index);
1374 /// Internal utility to identify a thread stored internally
1375 /// returns the id and the name for better diagnostics.
1376 fn print_thread_metadata(&self, vector: VectorIdx) -> String {
1377 let thread = self.vector_info.borrow()[vector];
1378 let thread_name = &self.thread_info.borrow()[thread].thread_name;
1379 if let Some(name) = thread_name {
1380 let name: &str = name;
1381 format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), &*name)
1383 format!("Thread(id = {:?})", thread.to_u32())
1387 /// Acquire a lock, express that the previous call of
1388 /// `validate_lock_release` must happen before this.
1389 /// As this is an acquire operation, the thread timestamp is not
1391 pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
1392 let (_, mut clocks) = self.load_thread_state_mut(thread);
1393 clocks.clock.join(&lock);
1396 /// Release a lock handle, express that this happens-before
1397 /// any subsequent calls to `validate_lock_acquire`.
1398 /// For normal locks this should be equivalent to `validate_lock_release_shared`
1399 /// since an acquire operation should have occurred before, however
1400 /// for futex & condvar operations this is not the case and this
1401 /// operation must be used.
1402 pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId) {
1403 let (index, mut clocks) = self.load_thread_state_mut(thread);
1404 lock.clone_from(&clocks.clock);
1405 clocks.increment_clock(index);
1408 /// Release a lock handle, express that this happens-before
1409 /// any subsequent calls to `validate_lock_acquire` as well
1410 /// as any previous calls to this function after any
1411 /// `validate_lock_release` calls.
1412 /// For normal locks this should be equivalent to `validate_lock_release`.
1413 /// This function only exists for joining over the set of concurrent readers
1414 /// in a read-write lock and should not be used for anything else.
1415 pub fn validate_lock_release_shared(&self, lock: &mut VClock, thread: ThreadId) {
1416 let (index, mut clocks) = self.load_thread_state_mut(thread);
1417 lock.join(&clocks.clock);
1418 clocks.increment_clock(index);
1421 /// Load the vector index used by the given thread as well as the set of vector clocks
1422 /// used by the thread.
1424 fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1425 let index = self.thread_info.borrow()[thread]
1427 .expect("Loading thread state for thread with no assigned vector");
1428 let ref_vector = self.vector_clocks.borrow_mut();
1429 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1433 /// Load the current vector clock in use and the current set of thread clocks
1434 /// in use for the vector.
1436 fn current_thread_state(&self) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1437 let index = self.current_index();
1438 let ref_vector = self.vector_clocks.borrow();
1439 let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1443 /// Load the current vector clock in use and the current set of thread clocks
1444 /// in use for the vector mutably for modification.
1446 fn current_thread_state_mut(&self) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1447 let index = self.current_index();
1448 let ref_vector = self.vector_clocks.borrow_mut();
1449 let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1453 /// Return the current thread, should be the same
1454 /// as the data-race active thread.
1456 fn current_index(&self) -> VectorIdx {
1457 self.current_index.get()