1 //! Implementation of a data-race detector
2 //! uses Lamport Timestamps / Vector-clocks
3 //! base on the Dyamic Race Detection for C++:
4 //! - https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf
5 //! to extend data-race detection to work correctly with fences
7 //! This does not explore weak memory orders and so can still miss data-races
8 //! but should not report false-positives
10 use std::{fmt::{self, Debug}, cmp::Ordering, rc::Rc, cell::{Cell, RefCell, Ref, RefMut}, ops::Index};
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_target::abi::Size;
14 use rustc_middle::ty::layout::TyAndLayout;
15 use rustc_data_structures::fx::FxHashMap;
17 use smallvec::SmallVec;
21 pub type AllocExtra = VClockAlloc;
22 pub type MemoryExtra = Rc<GlobalState>;
24 /// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive)
25 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
34 /// Valid atomic read operations, subset of atomic::Ordering
35 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
36 pub enum AtomicReadOp {
42 /// Valid atomic write operations, subset of atomic::Ordering
43 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
44 pub enum AtomicWriteOp {
51 /// Valid atomic fence operations, subset of atomic::Ordering
52 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
53 pub enum AtomicFenceOp {
60 /// Evaluation context extensions
61 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
62 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
64 /// Variant of `read_immediate` that does not perform `data-race` checks.
65 fn read_immediate_racy(&self, op: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
66 let this = self.eval_context_ref();
67 let data_race = &*this.memory.extra.data_race;
68 let old = data_race.multi_threaded.get();
70 data_race.multi_threaded.set(false);
71 let res = this.read_immediate(op.into());
73 data_race.multi_threaded.set(old);
77 /// Variant of `write_immediate` that does not perform `data-race` checks.
78 fn write_immediate_racy(
79 &mut self, src: Immediate<Tag>, dest: MPlaceTy<'tcx, Tag>
80 ) -> InterpResult<'tcx> {
81 let this = self.eval_context_mut();
82 let data_race = &*this.memory.extra.data_race;
83 let old = data_race.multi_threaded.get();
85 data_race.multi_threaded.set(false);
86 let imm = this.write_immediate(src, dest.into());
88 let data_race = &*this.memory.extra.data_race;
89 data_race.multi_threaded.set(old);
93 /// Variant of `read_scalar` that does not perform data-race checks.
95 &self, op: MPlaceTy<'tcx, Tag>
96 )-> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
97 Ok(self.read_immediate_racy(op)?.to_scalar_or_uninit())
100 /// Variant of `write_scalar` that does not perform data-race checks.
101 fn write_scalar_racy(
102 &mut self, val: ScalarMaybeUninit<Tag>, dest: MPlaceTy<'tcx, Tag>
103 ) -> InterpResult<'tcx> {
104 self.write_immediate_racy(Immediate::Scalar(val.into()), dest)
107 /// Variant of `read_scalar_at_offset` helper function that does not perform
108 /// `data-race checks.
109 fn read_scalar_at_offset_racy(
113 layout: TyAndLayout<'tcx>,
114 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
115 let this = self.eval_context_ref();
116 let op_place = this.deref_operand(op)?;
117 let offset = Size::from_bytes(offset);
118 // Ensure that the following read at an offset is within bounds
119 assert!(op_place.layout.size >= offset + layout.size);
120 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
121 this.read_scalar_racy(value_place.into())
124 /// Variant of `write_scalar_at_offfset` helper function that does not perform
125 /// data-race checks.
126 fn write_scalar_at_offset_racy(
130 value: impl Into<ScalarMaybeUninit<Tag>>,
131 layout: TyAndLayout<'tcx>,
132 ) -> InterpResult<'tcx, ()> {
133 let this = self.eval_context_mut();
134 let op_place = this.deref_operand(op)?;
135 let offset = Size::from_bytes(offset);
136 // Ensure that the following read at an offset is within bounds
137 assert!(op_place.layout.size >= offset + layout.size);
138 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
139 this.write_scalar_racy(value.into(), value_place.into())
142 /// Load the data race allocation state for a given memory place
143 /// also returns the size and the offset of the result in the allocation
145 fn load_data_race_state<'a>(
146 &'a mut self, place: MPlaceTy<'tcx, Tag>
147 ) -> InterpResult<'tcx, (&'a mut VClockAlloc, Size, Size)> where 'mir: 'a {
148 let this = self.eval_context_mut();
150 let ptr = place.ptr.assert_ptr();
151 let size = place.layout.size;
152 let data_race = &mut this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race;
154 Ok((data_race, size, ptr.offset))
157 /// Update the data-race detector for an atomic read occuring at the
158 /// associated memory-place and on the current thread
159 fn validate_atomic_load(
160 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicReadOp
161 ) -> InterpResult<'tcx> {
162 let this = self.eval_context_mut();
163 let data_race = &*this.memory.extra.data_race;
164 if data_race.multi_threaded.get() {
165 data_race.advance_vector_clock();
169 ) = this.load_data_race_state(place)?;
171 "Atomic load on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
172 alloc.global.current_thread(), atomic,
173 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
176 let mut current_state = alloc.global.current_thread_state_mut();
177 if atomic == AtomicReadOp::Relaxed {
178 // Perform relaxed atomic load
179 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
180 range.load_relaxed(&mut *current_state);
183 // Perform acquire(or seq-cst) atomic load
184 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
185 range.acquire(&mut *current_state);
189 // Log changes to atomic memory
190 if log::log_enabled!(log::Level::Trace) {
191 for range in alloc.alloc_ranges.get_mut().iter(offset, size) {
193 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
194 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
200 std::mem::drop(current_state);
201 let data_race = &*this.memory.extra.data_race;
202 data_race.advance_vector_clock();
207 /// Update the data-race detector for an atomic write occuring at the
208 /// associated memory-place and on the current thread
209 fn validate_atomic_store(
210 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicWriteOp
211 ) -> InterpResult<'tcx> {
212 let this = self.eval_context_mut();
213 let data_race = &*this.memory.extra.data_race;
214 if data_race.multi_threaded.get() {
215 data_race.advance_vector_clock();
219 ) = this.load_data_race_state(place)?;
220 let current_thread = alloc.global.current_thread();
221 let mut current_state = alloc.global.current_thread_state_mut();
223 "Atomic store on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
224 current_thread, atomic,
225 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
228 if atomic == AtomicWriteOp::Relaxed {
229 // Perform relaxed atomic store
230 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
231 range.store_relaxed(&mut *current_state, current_thread);
234 // Perform release(or seq-cst) atomic store
235 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
236 range.release(&mut *current_state, current_thread);
240 // Log changes to atomic memory
241 if log::log_enabled!(log::Level::Trace) {
242 for range in alloc.alloc_ranges.get_mut().iter(offset, size) {
244 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
245 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
251 std::mem::drop(current_state);
252 let data_race = &*this.memory.extra.data_race;
253 data_race.advance_vector_clock();
258 /// Update the data-race detector for an atomic read-modify-write occuring
259 /// at the associated memory place and on the current thread
260 fn validate_atomic_rmw(
261 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicRWOp
262 ) -> InterpResult<'tcx> {
264 let this = self.eval_context_mut();
265 let data_race = &*this.memory.extra.data_race;
266 if data_race.multi_threaded.get() {
267 data_race.advance_vector_clock();
271 ) = this.load_data_race_state(place)?;
272 let current_thread = alloc.global.current_thread();
273 let mut current_state = alloc.global.current_thread_state_mut();
275 "Atomic RMW on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
276 current_thread, atomic,
277 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
280 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
281 let release = matches!(atomic, Release | AcqRel | SeqCst);
282 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
283 //FIXME: this is probably still slightly wrong due to the quirks
284 // in the c++11 memory model
286 // Atomic RW-Op acquire
287 range.acquire(&mut *current_state);
289 range.load_relaxed(&mut *current_state);
292 // Atomic RW-Op release
293 range.rmw_release(&mut *current_state, current_thread);
295 range.rmw_relaxed(&mut *current_state);
299 // Log changes to atomic memory
300 if log::log_enabled!(log::Level::Trace) {
301 for range in alloc.alloc_ranges.get_mut().iter(offset, size) {
303 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
304 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
310 std::mem::drop(current_state);
311 let data_race = &*this.memory.extra.data_race;
312 data_race.advance_vector_clock();
317 /// Update the data-race detector for an atomic fence on the current thread
318 fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
319 let this = self.eval_context_mut();
320 let data_race = &*this.memory.extra.data_race;
321 if data_race.multi_threaded.get() {
322 data_race.advance_vector_clock();
324 log::trace!("Atomic fence on {:?} with ordering {:?}", data_race.current_thread(), atomic);
325 // Apply data-race detection for the current fences
326 // this treats AcqRel and SeqCst as the same as a acquire
327 // and release fence applied in the same timestamp.
328 if atomic != AtomicFenceOp::Release {
329 // Either Acquire | AcqRel | SeqCst
330 data_race.current_thread_state_mut().apply_acquire_fence();
332 if atomic != AtomicFenceOp::Acquire {
333 // Either Release | AcqRel | SeqCst
334 data_race.current_thread_state_mut().apply_release_fence();
337 data_race.advance_vector_clock();
343 /// Handle for locks to express their
344 /// acquire-release semantics
345 #[derive(Clone, Debug, Default)]
346 pub struct DataRaceLockHandle {
348 /// Internal acquire-release clock
349 /// to express the acquire release sync
350 /// found in concurrency primitives
353 impl DataRaceLockHandle {
354 pub fn set_values(&mut self, other: &Self) {
355 self.clock.set_values(&other.clock)
357 pub fn reset(&mut self) {
358 self.clock.set_zero_vector();
363 /// Avoid an atomic allocation for the common
364 /// case with atomic operations where the number
365 /// of active release sequences is small
366 #[derive(Clone, PartialEq, Eq)]
367 enum AtomicReleaseSequences {
369 /// Contains one or no values
370 /// if empty: (None, reset vector clock)
371 /// if one: (Some(thread), thread_clock)
372 ReleaseOneOrEmpty(Option<ThreadId>, VClock),
374 /// Contains two or more values
375 /// stored in a hash-map of thread id to
377 ReleaseMany(FxHashMap<ThreadId, VClock>)
379 impl AtomicReleaseSequences {
381 /// Return an empty set of atomic release sequences
383 fn new() -> AtomicReleaseSequences {
384 Self::ReleaseOneOrEmpty(None, VClock::default())
387 /// Remove all values except for the value stored at `thread` and set
388 /// the vector clock to the associated `clock` value
390 fn clear_and_set(&mut self, thread: ThreadId, clock: &VClock) {
392 Self::ReleaseOneOrEmpty(id, rel_clock) => {
394 rel_clock.set_values(clock);
396 Self::ReleaseMany(_) => {
397 *self = Self::ReleaseOneOrEmpty(Some(thread), clock.clone());
402 /// Remove all values except for the value stored at `thread`
404 fn clear_and_retain(&mut self, thread: ThreadId) {
406 Self::ReleaseOneOrEmpty(id, rel_clock) => {
407 // Keep or forget depending on id
408 if *id == Some(thread) {
410 rel_clock.set_zero_vector();
413 Self::ReleaseMany(hash_map) => {
414 // Retain only the thread element, so reduce to size
415 // of 1 or 0, and move to smaller format
416 if let Some(clock) = hash_map.remove(&thread) {
417 *self = Self::ReleaseOneOrEmpty(Some(thread), clock);
425 /// Insert a release sequence at `thread` with values `clock`
426 fn insert(&mut self, thread: ThreadId, clock: &VClock) {
428 Self::ReleaseOneOrEmpty(id, rel_clock) => {
429 if id.map_or(true, |id| id == thread) {
431 rel_clock.set_values(clock);
433 let mut hash_map = FxHashMap::default();
434 hash_map.insert(thread, clock.clone());
435 hash_map.insert(id.unwrap(), rel_clock.clone());
436 *self = Self::ReleaseMany(hash_map);
439 Self::ReleaseMany(hash_map) => {
440 hash_map.insert(thread, clock.clone());
445 /// Return the release sequence at `thread` if one exists
447 fn load(&self, thread: ThreadId) -> Option<&VClock> {
449 Self::ReleaseOneOrEmpty(id, clock) => {
450 if *id == Some(thread) {
456 Self::ReleaseMany(hash_map) => {
457 hash_map.get(&thread)
463 /// Custom debug implementation to correctly
464 /// print debug as a logical mapping from threads
466 impl Debug for AtomicReleaseSequences {
467 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
469 Self::ReleaseOneOrEmpty(None,_) => {
470 f.debug_map().finish()
472 Self::ReleaseOneOrEmpty(Some(id), clock) => {
473 f.debug_map().entry(&id, &clock).finish()
475 Self::ReleaseMany(hash_map) => {
476 Debug::fmt(hash_map, f)
482 /// Externally stored memory cell clocks
483 /// explicitly to reduce memory usage for the
484 /// common case where no atomic operations
485 /// exists on the memory cell
486 #[derive(Clone, PartialEq, Eq, Debug)]
487 struct AtomicMemoryCellClocks {
489 /// Synchronization vector for acquire-release semantics
492 /// The Hash-Map of all threads for which a release
493 /// sequence exists in the memory cell
494 release_sequences: AtomicReleaseSequences,
497 /// Memory Cell vector clock metadata
498 /// for data-race detection
499 #[derive(Clone, PartialEq, Eq, Debug)]
500 struct MemoryCellClocks {
502 /// The vector-clock of the last write
505 /// The id of the thread that performed the last write to this memory location
506 write_thread: ThreadId,
508 /// The vector-clock of the set of previous reads
509 /// each index is set to the timestamp that the associated
510 /// thread last read this value.
513 /// Atomic acquire & release sequence tracking clocks
514 /// for non-atomic memory in the common case this
515 /// value is set to None
516 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
519 /// Create a default memory cell clocks instance
520 /// for uninitialized memory
521 impl Default for MemoryCellClocks {
522 fn default() -> Self {
524 read: VClock::default(),
526 write_thread: ThreadId::new(u32::MAX as usize),
532 impl MemoryCellClocks {
534 /// Load the internal atomic memory cells if they exist
536 fn atomic(&mut self) -> Option<&AtomicMemoryCellClocks> {
537 match &self.atomic_ops {
538 Some(op) => Some(&*op),
543 /// Load or create the internal atomic memory metadata
544 /// if it does not exist
546 fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
547 self.atomic_ops.get_or_insert_with(|| {
548 Box::new(AtomicMemoryCellClocks {
549 sync_vector: VClock::default(),
550 release_sequences: AtomicReleaseSequences::new()
555 /// Update memory cell data-race tracking for atomic
556 /// load acquire semantics, is a no-op if this memory was
557 /// not used previously as atomic memory
558 fn acquire(&mut self, clocks: &mut ThreadClockSet) {
559 if let Some(atomic) = self.atomic() {
560 clocks.clock.join(&atomic.sync_vector);
563 /// Update memory cell data-race tracking for atomic
564 /// load relaxed semantics, is a no-op if this memory was
565 /// not used previously as atomic memory
566 fn load_relaxed(&mut self, clocks: &mut ThreadClockSet) {
567 if let Some(atomic) = self.atomic() {
568 clocks.fence_acquire.join(&atomic.sync_vector);
573 /// Update the memory cell data-race tracking for atomic
574 /// store release semantics
575 fn release(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
576 let atomic = self.atomic_mut();
577 atomic.sync_vector.set_values(&clocks.clock);
578 atomic.release_sequences.clear_and_set(thread, &clocks.clock);
580 /// Update the memory cell data-race tracking for atomic
581 /// store relaxed semantics
582 fn store_relaxed(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
583 let atomic = self.atomic_mut();
584 atomic.sync_vector.set_values(&clocks.fence_release);
585 if let Some(release) = atomic.release_sequences.load(thread) {
586 atomic.sync_vector.join(release);
588 atomic.release_sequences.clear_and_retain(thread);
590 /// Update the memory cell data-race tracking for atomic
591 /// store release semantics for RMW operations
592 fn rmw_release(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
593 let atomic = self.atomic_mut();
594 atomic.sync_vector.join(&clocks.clock);
595 atomic.release_sequences.insert(thread, &clocks.clock);
597 /// Update the memory cell data-race tracking for atomic
598 /// store relaxed semantics for RMW operations
599 fn rmw_relaxed(&mut self, clocks: &ThreadClockSet) {
600 let atomic = self.atomic_mut();
601 atomic.sync_vector.join(&clocks.fence_release);
606 /// Detect races for non-atomic read operations at the current memory cell
607 /// returns true if a data-race is detected
608 fn read_race_detect(&mut self, clocks: &ThreadClockSet, thread: ThreadId) -> bool {
609 if self.write <= clocks.clock[self.write_thread] {
610 self.read.set_at_thread(&clocks.clock, thread);
617 /// Detect races for non-atomic write operations at the current memory cell
618 /// returns true if a data-race is detected
619 fn write_race_detect(&mut self, clocks: &ThreadClockSet, thread: ThreadId) -> bool {
620 if self.write <= clocks.clock[self.write_thread] && self.read <= clocks.clock {
621 self.write = clocks.clock[thread];
622 self.write_thread = thread;
623 self.read.set_zero_vector();
631 /// Vector clock metadata for a logical memory allocation
632 #[derive(Debug, Clone)]
633 pub struct VClockAlloc {
635 /// Range of Vector clocks, mapping to the vector-clock
636 /// index of the last write to the bytes in this allocation
637 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
639 // Pointer to global state
645 /// Create a new data-race allocation detector
646 pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
648 global: Rc::clone(global),
649 alloc_ranges: RefCell::new(
650 RangeMap::new(len, MemoryCellClocks::default())
655 /// Report a data-race found in the program
656 /// this finds the two racing threads and the type
657 /// of data-race that occured, this will also
658 /// return info about the memory location the data-race
662 fn report_data_race<'tcx>(
663 global: &MemoryExtra, range: &MemoryCellClocks, action: &str,
664 pointer: Pointer<Tag>, len: Size
665 ) -> InterpResult<'tcx> {
666 let current_thread = global.current_thread();
667 let current_state = global.current_thread_state();
668 let mut write_clock = VClock::default();
670 other_action, other_thread, other_clock
671 ) = if range.write > current_state.clock[range.write_thread] {
673 // Create effective write-clock that the data-race occured with
674 let wclock = write_clock.get_mut_with_min_len(
675 current_state.clock.as_slice().len()
676 .max(range.write_thread.to_u32() as usize + 1)
678 wclock[range.write_thread.to_u32() as usize] = range.write;
679 ("WRITE", range.write_thread, write_clock.as_slice())
682 // Find index in the read-clock that the data-race occured with
683 let read_slice = range.read.as_slice();
684 let clock_slice = current_state.clock.as_slice();
685 let conflicting_index = read_slice.iter()
686 .zip(clock_slice.iter())
687 .enumerate().find_map(|(idx,(&read, &clock))| {
693 }).unwrap_or_else(|| {
694 assert!(read_slice.len() > clock_slice.len(), "BUG: cannot find read race yet reported data-race");
695 let rest_read = &read_slice[clock_slice.len()..];
696 rest_read.iter().enumerate().find_map(|(idx, &val)| {
698 Some(idx + clock_slice.len())
702 }).expect("Invariant broken for read-slice, no 0 element at the tail")
704 ("READ", ThreadId::new(conflicting_index), range.read.as_slice())
707 let current_thread_info = global.print_thread_metadata(current_thread);
708 let other_thread_info = global.print_thread_metadata(other_thread);
710 // Throw the data-race detection
712 "Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
713 \n\t\t -current vector clock = {:?}\
714 \n\t\t -conflicting timestamp = {:?}",
715 action, current_thread_info,
716 other_action, other_thread_info,
717 pointer.alloc_id, pointer.offset.bytes(), len.bytes(),
723 /// Detect data-races for an unsychronized read operation, will not perform
724 /// data-race threads if `multi-threaded` is false, either due to no threads
725 /// being created or if it is temporarily disabled during a racy read or write
727 pub fn read<'tcx>(&self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
728 if self.global.multi_threaded.get() {
729 let current_thread = self.global.current_thread();
730 let current_state = self.global.current_thread_state();
732 // The alloc-ranges are not split, however changes are not going to be made
733 // to the ranges being tested, so this is ok
734 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
735 for range in alloc_ranges.iter_mut(pointer.offset, len) {
736 if range.read_race_detect(&*current_state, current_thread) {
738 return Self::report_data_race(
739 &self.global,range, "READ", pointer, len
748 /// Detect data-races for an unsychronized write operation, will not perform
749 /// data-race threads if `multi-threaded` is false, either due to no threads
750 /// being created or if it is temporarily disabled during a racy read or write
752 pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
753 if self.global.multi_threaded.get() {
754 let current_thread = self.global.current_thread();
755 let current_state = self.global.current_thread_state();
756 for range in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
757 if range.write_race_detect(&*current_state, current_thread) {
759 return Self::report_data_race(
760 &self.global, range, "WRITE", pointer, len
769 /// Detect data-races for an unsychronized deallocate operation, will not perform
770 /// data-race threads if `multi-threaded` is false, either due to no threads
771 /// being created or if it is temporarily disabled during a racy read or write
773 pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
774 if self.global.multi_threaded.get() {
775 let current_thread = self.global.current_thread();
776 let current_state = self.global.current_thread_state();
777 for range in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
778 if range.write_race_detect(&*current_state, current_thread) {
780 return Self::report_data_race(
781 &self.global, range, "DEALLOCATE", pointer, len
792 /// The current set of vector clocks describing the state
793 /// of a thread, contains the happens-before clock and
794 /// additional metadata to model atomic fence operations
795 #[derive(Clone, Default, Debug)]
796 struct ThreadClockSet {
797 /// The increasing clock representing timestamps
798 /// that happen-before this thread.
801 /// The set of timestamps that will happen-before this
802 /// thread once it performs an acquire fence
803 fence_acquire: VClock,
805 /// The last timesamp of happens-before relations that
806 /// have been released by this thread by a fence
807 fence_release: VClock,
810 impl ThreadClockSet {
812 /// Apply the effects of a release fence to this
813 /// set of thread vector clocks
815 fn apply_release_fence(&mut self) {
816 self.fence_release.set_values(&self.clock);
819 /// Apply the effects of a acquire fence to this
820 /// set of thread vector clocks
822 fn apply_acquire_fence(&mut self) {
823 self.clock.join(&self.fence_acquire);
826 /// Increment the happens-before clock at a
829 fn increment_clock(&mut self, thread: ThreadId) {
830 self.clock.increment_thread(thread);
833 /// Join the happens-before clock with that of
834 /// another thread, used to model thread join
836 fn join_with(&mut self, other: &ThreadClockSet) {
837 self.clock.join(&other.clock);
841 /// Global data-race detection state, contains the currently
842 /// executing thread as well as the vector-clocks associated
843 /// with each of the threads.
844 #[derive(Debug, Clone)]
845 pub struct GlobalState {
847 /// Set to true once the first additional
848 /// thread has launched, due to the dependency
849 /// between before and after a thread launch
850 /// Any data-races must be recorded after this
851 /// so concurrent execution can ignore recording
853 multi_threaded: Cell<bool>,
855 /// The current vector clock for all threads
856 /// this includes threads that have terminated
858 thread_clocks: RefCell<IndexVec<ThreadId, ThreadClockSet>>,
860 /// Thread name cache for better diagnostics on the reporting
862 thread_names: RefCell<IndexVec<ThreadId, Option<Box<str>>>>,
864 /// The current thread being executed,
865 /// this is mirrored from the scheduler since
866 /// it is required for loading the current vector
867 /// clock for data-race detection
868 current_thread_id: Cell<ThreadId>,
872 /// Create a new global state, setup with just thread-id=0
873 /// advanced to timestamp = 1
874 pub fn new() -> Self {
875 let mut vec = IndexVec::new();
876 let thread_id = vec.push(ThreadClockSet::default());
877 vec[thread_id].increment_clock(thread_id);
879 multi_threaded: Cell::new(false),
880 thread_clocks: RefCell::new(vec),
881 thread_names: RefCell::new(IndexVec::new()),
882 current_thread_id: Cell::new(thread_id),
887 // Hook for thread creation, enabled multi-threaded execution and marks
888 // the current thread timestamp as happening-before the current thread
890 pub fn thread_created(&self, thread: ThreadId) {
892 // Enable multi-threaded execution mode now that there are at least
894 self.multi_threaded.set(true);
895 let current_thread = self.current_thread_id.get();
896 let mut vectors = self.thread_clocks.borrow_mut();
897 vectors.ensure_contains_elem(thread, Default::default);
898 let (current, created) = vectors.pick2_mut(current_thread, thread);
900 // Pre increment clocks before atomic operation
901 current.increment_clock(current_thread);
903 // The current thread happens-before the created thread
904 // so update the created vector clock
905 created.join_with(current);
907 // Post increment clocks after atomic operation
908 current.increment_clock(current_thread);
909 created.increment_clock(thread);
912 /// Hook on a thread join to update the implicit happens-before relation
913 /// between the joined thead and the current thread
915 pub fn thread_joined(&self, current_thread: ThreadId, join_thread: ThreadId) {
916 let mut vectors = self.thread_clocks.borrow_mut();
917 let (current, join) = vectors.pick2_mut(current_thread, join_thread);
919 // Pre increment clocks before atomic operation
920 current.increment_clock(current_thread);
921 join.increment_clock(join_thread);
923 // The join thread happens-before the current thread
924 // so update the current vector clock
925 current.join_with(join);
927 // Post increment clocks after atomic operation
928 current.increment_clock(current_thread);
929 join.increment_clock(join_thread);
932 /// Hook for updating the local tracker of the currently
933 /// enabled thread, should always be updated whenever
934 /// `active_thread` in thread.rs is updated
936 pub fn thread_set_active(&self, thread: ThreadId) {
937 self.current_thread_id.set(thread);
940 /// Hook for updating the local tracker of the threads name
941 /// this should always mirror the local value in thread.rs
942 /// the thread name is used for improved diagnostics
943 /// during a data-race
945 pub fn thread_set_name(&self, name: String) {
946 let name = name.into_boxed_str();
947 let mut names = self.thread_names.borrow_mut();
948 let thread = self.current_thread_id.get();
949 names.ensure_contains_elem(thread, Default::default);
950 names[thread] = Some(name);
954 /// Advance the vector clock for a thread
955 /// this is called before and after any atomic/synchronizing operations
956 /// that may manipulate state
958 fn advance_vector_clock(&self) {
959 let thread = self.current_thread_id.get();
960 let mut vectors = self.thread_clocks.borrow_mut();
961 vectors[thread].increment_clock(thread);
963 // Log the increment in the atomic vector clock
964 log::trace!("Atomic vector clock increase for {:?} to {:?}",thread, vectors[thread].clock);
968 /// Internal utility to identify a thread stored internally
969 /// returns the id and the name for better diagnostics
970 fn print_thread_metadata(&self, thread: ThreadId) -> String {
971 if let Some(Some(name)) = self.thread_names.borrow().get(thread) {
972 let name: &str = name;
973 format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), &*name)
975 format!("Thread(id = {:?})", thread.to_u32())
980 /// Acquire a lock, express that the previous call of
981 /// `validate_lock_release` must happen before this
982 pub fn validate_lock_acquire(&self, lock: &DataRaceLockHandle, thread: ThreadId) {
983 let mut ref_vector = self.thread_clocks.borrow_mut();
984 ref_vector[thread].increment_clock(thread);
986 let clocks = &mut ref_vector[thread];
987 clocks.clock.join(&lock.clock);
989 ref_vector[thread].increment_clock(thread);
992 /// Release a lock handle, express that this happens-before
993 /// any subsequent calls to `validate_lock_acquire`
994 pub fn validate_lock_release(&self, lock: &mut DataRaceLockHandle, thread: ThreadId) {
995 let mut ref_vector = self.thread_clocks.borrow_mut();
996 ref_vector[thread].increment_clock(thread);
998 let clocks = &ref_vector[thread];
999 lock.clock.set_values(&clocks.clock);
1001 ref_vector[thread].increment_clock(thread);
1004 /// Release a lock handle, express that this happens-before
1005 /// any subsequent calls to `validate_lock_acquire` as well
1006 /// as any previous calls to this function after any
1007 /// `validate_lock_release` calls
1008 pub fn validate_lock_release_shared(&self, lock: &mut DataRaceLockHandle, thread: ThreadId) {
1009 let mut ref_vector = self.thread_clocks.borrow_mut();
1010 ref_vector[thread].increment_clock(thread);
1012 let clocks = &ref_vector[thread];
1013 lock.clock.join(&clocks.clock);
1015 ref_vector[thread].increment_clock(thread);
1018 /// Load the thread clock set associated with the current thread
1020 fn current_thread_state(&self) -> Ref<'_, ThreadClockSet> {
1021 let ref_vector = self.thread_clocks.borrow();
1022 let thread = self.current_thread_id.get();
1023 Ref::map(ref_vector, |vector| &vector[thread])
1026 /// Load the thread clock set associated with the current thread
1027 /// mutably for modification
1029 fn current_thread_state_mut(&self) -> RefMut<'_, ThreadClockSet> {
1030 let ref_vector = self.thread_clocks.borrow_mut();
1031 let thread = self.current_thread_id.get();
1032 RefMut::map(ref_vector, |vector| &mut vector[thread])
1035 /// Return the current thread, should be the same
1036 /// as the data-race active thread
1038 fn current_thread(&self) -> ThreadId {
1039 self.current_thread_id.get()
1044 /// The size of the vector-clock to store inline
1045 /// clock vectors larger than this will be stored on the heap
1046 const SMALL_VECTOR: usize = 4;
1048 /// The type of the time-stamps recorded in the data-race detector
1049 /// set to a type of unsigned integer
1050 type Timestamp = u32;
1052 /// A vector clock for detecting data-races
1054 /// - the last element in a VClock must not be 0
1055 /// -- this means that derive(PartialEq & Eq) is correct
1056 /// -- as there is no implicit zero tail that might be equal
1057 /// -- also simplifies the implementation of PartialOrd
1058 #[derive(Clone, PartialEq, Eq, Default, Debug)]
1059 pub struct VClock(SmallVec<[Timestamp; SMALL_VECTOR]>);
1063 /// Load the backing slice behind the clock vector.
1065 fn as_slice(&self) -> &[Timestamp] {
1069 /// Get a mutable slice to the internal vector with minimum `min_len`
1070 /// elements, to preserve invariants this vector must modify
1071 /// the `min_len`-1 nth element to a non-zero value
1073 fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [Timestamp] {
1074 if self.0.len() < min_len {
1075 self.0.resize(min_len, 0);
1077 assert!(self.0.len() >= min_len);
1078 self.0.as_mut_slice()
1081 /// Increment the vector clock at a known index
1083 fn increment_index(&mut self, idx: usize) {
1084 let mut_slice = self.get_mut_with_min_len(idx + 1);
1085 let idx_ref = &mut mut_slice[idx];
1086 *idx_ref = idx_ref.checked_add(1).expect("Vector clock overflow")
1089 // Increment the vector element representing the progress
1090 // of execution in the given thread
1092 pub fn increment_thread(&mut self, thread: ThreadId) {
1093 self.increment_index(thread.to_u32() as usize);
1096 // Join the two vector-clocks together, this
1097 // sets each vector-element to the maximum value
1098 // of that element in either of the two source elements.
1099 pub fn join(&mut self, other: &Self) {
1100 let rhs_slice = other.as_slice();
1101 let lhs_slice = self.get_mut_with_min_len(rhs_slice.len());
1103 // Element-wise set to maximum.
1104 for (l, &r) in lhs_slice.iter_mut().zip(rhs_slice.iter()) {
1109 /// Joins with a thread at a known index
1110 fn set_at_index(&mut self, other: &Self, idx: usize){
1111 let mut_slice = self.get_mut_with_min_len(idx + 1);
1112 let slice = other.as_slice();
1113 mut_slice[idx] = slice[idx];
1116 /// Join with a threads vector clock only at the desired index
1117 /// returns true if the value updated
1119 pub fn set_at_thread(&mut self, other: &Self, thread: ThreadId){
1120 self.set_at_index(other, thread.to_u32() as usize);
1123 /// Clear the vector to all zeros, stored as an empty internal
1126 pub fn set_zero_vector(&mut self) {
1130 /// Set the values stored in this vector clock
1131 /// to the values stored in another.
1132 pub fn set_values(&mut self, new_value: &VClock) {
1133 let new_slice = new_value.as_slice();
1134 self.0.resize(new_slice.len(), 0);
1135 self.0.copy_from_slice(new_slice);
1140 impl PartialOrd for VClock {
1141 fn partial_cmp(&self, other: &VClock) -> Option<Ordering> {
1143 // Load the values as slices
1144 let lhs_slice = self.as_slice();
1145 let rhs_slice = other.as_slice();
1147 // Iterate through the combined vector slice
1148 // keeping track of the order that is currently possible to satisfy.
1149 // If an ordering relation is detected to be impossible, then bail and
1150 // directly return None
1151 let mut iter = lhs_slice.iter().zip(rhs_slice.iter());
1152 let mut order = match iter.next() {
1153 Some((lhs, rhs)) => lhs.cmp(rhs),
1154 None => Ordering::Equal
1156 for (l, r) in iter {
1158 Ordering::Equal => order = l.cmp(r),
1159 Ordering::Less => if l > r {
1162 Ordering::Greater => if l < r {
1168 //Now test if either left or right have trailing elements
1169 // by the invariant the trailing elements have at least 1
1170 // non zero value, so no additional calculation is required
1171 // to determine the result of the PartialOrder
1172 let l_len = lhs_slice.len();
1173 let r_len = rhs_slice.len();
1174 match l_len.cmp(&r_len) {
1175 // Equal has no additional elements: return current order
1176 Ordering::Equal => Some(order),
1177 // Right has at least 1 element > than the implicit 0,
1178 // so the only valid values are Ordering::Less or None
1179 Ordering::Less => match order {
1180 Ordering::Less | Ordering::Equal => Some(Ordering::Less),
1181 Ordering::Greater => None
1183 // Left has at least 1 element > than the implicit 0,
1184 // so the only valid values are Ordering::Greater or None
1185 Ordering::Greater => match order {
1186 Ordering::Greater | Ordering::Equal => Some(Ordering::Greater),
1187 Ordering::Less => None
1192 fn lt(&self, other: &VClock) -> bool {
1193 // Load the values as slices
1194 let lhs_slice = self.as_slice();
1195 let rhs_slice = other.as_slice();
1197 // If l_len > r_len then at least one element
1198 // in l_len is > than r_len, therefore the result
1199 // is either Some(Greater) or None, so return false
1201 let l_len = lhs_slice.len();
1202 let r_len = rhs_slice.len();
1204 // If any elements on the left are greater than the right
1205 // then the result is None or Some(Greater), both of which
1206 // return false, the earlier test asserts that no elements in the
1207 // extended tail violate this assumption. Otherwise l <= r, finally
1208 // the case where the values are potentially equal needs to be considered
1209 // and false returned as well
1210 let mut equal = l_len == r_len;
1211 for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
1224 fn le(&self, other: &VClock) -> bool {
1225 // Load the values as slices
1226 let lhs_slice = self.as_slice();
1227 let rhs_slice = other.as_slice();
1229 // If l_len > r_len then at least one element
1230 // in l_len is > than r_len, therefore the result
1231 // is either Some(Greater) or None, so return false
1233 let l_len = lhs_slice.len();
1234 let r_len = rhs_slice.len();
1236 // If any elements on the left are greater than the right
1237 // then the result is None or Some(Greater), both of which
1238 // return false, the earlier test asserts that no elements in the
1239 // extended tail violate this assumption. Otherwise l <= r
1240 !lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l > r)
1246 fn gt(&self, other: &VClock) -> bool {
1247 // Load the values as slices
1248 let lhs_slice = self.as_slice();
1249 let rhs_slice = other.as_slice();
1251 // If r_len > l_len then at least one element
1252 // in r_len is > than l_len, therefore the result
1253 // is either Some(Less) or None, so return false
1255 let l_len = lhs_slice.len();
1256 let r_len = rhs_slice.len();
1258 // If any elements on the left are less than the right
1259 // then the result is None or Some(Less), both of which
1260 // return false, the earlier test asserts that no elements in the
1261 // extended tail violate this assumption. Otherwise l >=, finally
1262 // the case where the values are potentially equal needs to be considered
1263 // and false returned as well
1264 let mut equal = l_len == r_len;
1265 for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
1278 fn ge(&self, other: &VClock) -> bool {
1279 // Load the values as slices
1280 let lhs_slice = self.as_slice();
1281 let rhs_slice = other.as_slice();
1283 // If r_len > l_len then at least one element
1284 // in r_len is > than l_len, therefore the result
1285 // is either Some(Less) or None, so return false
1287 let l_len = lhs_slice.len();
1288 let r_len = rhs_slice.len();
1290 // If any elements on the left are less than the right
1291 // then the result is None or Some(Less), both of which
1292 // return false, the earlier test asserts that no elements in the
1293 // extended tail violate this assumption. Otherwise l >= r
1294 !lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l < r)
1301 impl Index<ThreadId> for VClock {
1302 type Output = Timestamp;
1305 fn index(&self, index: ThreadId) -> &Timestamp {
1306 self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
1311 /// Test vector clock ordering operations
1312 /// data-race detection is tested in the external
1316 use super::{VClock, Timestamp};
1317 use std::cmp::Ordering;
1321 let mut c1 = VClock::default();
1322 let mut c2 = VClock::default();
1324 c1.increment_index(5);
1326 c2.increment_index(53);
1328 c1.increment_index(53);
1330 c2.increment_index(5);
1335 fn test_partial_order() {
1337 assert_order(&[1], &[1], Some(Ordering::Equal));
1338 assert_order(&[1], &[2], Some(Ordering::Less));
1339 assert_order(&[2], &[1], Some(Ordering::Greater));
1340 assert_order(&[1], &[1,2], Some(Ordering::Less));
1341 assert_order(&[2], &[1,2], None);
1344 assert_order(&[400], &[0, 1], None);
1347 assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Equal));
1348 assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
1349 assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Greater));
1350 assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], None);
1351 assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Less));
1352 assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
1355 fn from_slice(mut slice: &[Timestamp]) -> VClock {
1356 while let Some(0) = slice.last() {
1357 slice = &slice[..slice.len() - 1]
1359 VClock(smallvec::SmallVec::from_slice(slice))
1362 fn assert_order(l: &[Timestamp], r: &[Timestamp], o: Option<Ordering>) {
1363 let l = from_slice(l);
1364 let r = from_slice(r);
1367 let compare = l.partial_cmp(&r);
1368 assert_eq!(compare, o, "Invalid comparison\n l: {:?}\n r: {:?}",l,r);
1369 let alt_compare = r.partial_cmp(&l);
1370 assert_eq!(alt_compare, o.map(Ordering::reverse), "Invalid alt comparison\n l: {:?}\n r: {:?}",l,r);
1372 //Test operatorsm with faster implementations
1374 matches!(compare,Some(Ordering::Less)), l < r,
1375 "Invalid (<):\n l: {:?}\n r: {:?}",l,r
1378 matches!(compare,Some(Ordering::Less) | Some(Ordering::Equal)), l <= r,
1379 "Invalid (<=):\n l: {:?}\n r: {:?}",l,r
1382 matches!(compare,Some(Ordering::Greater)), l > r,
1383 "Invalid (>):\n l: {:?}\n r: {:?}",l,r
1386 matches!(compare,Some(Ordering::Greater) | Some(Ordering::Equal)), l >= r,
1387 "Invalid (>=):\n l: {:?}\n r: {:?}",l,r
1390 matches!(alt_compare,Some(Ordering::Less)), r < l,
1391 "Invalid alt (<):\n l: {:?}\n r: {:?}",l,r
1394 matches!(alt_compare,Some(Ordering::Less) | Some(Ordering::Equal)), r <= l,
1395 "Invalid alt (<=):\n l: {:?}\n r: {:?}",l,r
1398 matches!(alt_compare,Some(Ordering::Greater)), r > l,
1399 "Invalid alt (>):\n l: {:?}\n r: {:?}",l,r
1402 matches!(alt_compare,Some(Ordering::Greater) | Some(Ordering::Equal)), r >= l,
1403 "Invalid alt (>=):\n l: {:?}\n r: {:?}",l,r