1 //! Implementation of a data-race detector
2 //! uses Lamport Timestamps / Vector-clocks
3 //! base on the Dyamic Race Detection for C++:
4 //! - https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf
5 //! to extend data-race detection to work correctly with fences
7 //! This does not explore weak memory orders and so can still miss data-races
8 //! but should not report false-positives
10 use std::{fmt::{self, Debug}, cmp::Ordering, rc::Rc, cell::{Cell, RefCell, Ref, RefMut}, ops::Index};
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_target::abi::Size;
14 use rustc_middle::ty::layout::TyAndLayout;
15 use rustc_data_structures::fx::FxHashMap;
17 use smallvec::SmallVec;
21 pub type AllocExtra = VClockAlloc;
22 pub type MemoryExtra = Rc<GlobalState>;
24 /// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive)
25 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
34 /// Valid atomic read operations, subset of atomic::Ordering
35 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
36 pub enum AtomicReadOp {
42 /// Valid atomic write operations, subset of atomic::Ordering
43 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
44 pub enum AtomicWriteOp {
51 /// Valid atomic fence operations, subset of atomic::Ordering
52 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
53 pub enum AtomicFenceOp {
60 /// Evaluation context extensions
61 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
62 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
64 /// Variant of `read_immediate` that does not perform `data-race` checks.
65 fn read_immediate_racy(&self, op: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
66 let this = self.eval_context_ref();
67 let data_race = &*this.memory.extra.data_race;
68 let old = data_race.multi_threaded.get();
70 data_race.multi_threaded.set(false);
71 let res = this.read_immediate(op.into());
73 data_race.multi_threaded.set(old);
77 /// Variant of `write_immediate` that does not perform `data-race` checks.
78 fn write_immediate_racy(
79 &mut self, src: Immediate<Tag>, dest: MPlaceTy<'tcx, Tag>
80 ) -> InterpResult<'tcx> {
81 let this = self.eval_context_mut();
82 let data_race = &*this.memory.extra.data_race;
83 let old = data_race.multi_threaded.get();
85 data_race.multi_threaded.set(false);
86 let imm = this.write_immediate(src, dest.into());
88 let data_race = &*this.memory.extra.data_race;
89 data_race.multi_threaded.set(old);
93 /// Variant of `read_scalar` that does not perform data-race checks.
95 &self, op: MPlaceTy<'tcx, Tag>
96 )-> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
97 Ok(self.read_immediate_racy(op)?.to_scalar_or_uninit())
100 /// Variant of `write_scalar` that does not perform data-race checks.
101 fn write_scalar_racy(
102 &mut self, val: ScalarMaybeUninit<Tag>, dest: MPlaceTy<'tcx, Tag>
103 ) -> InterpResult<'tcx> {
104 self.write_immediate_racy(Immediate::Scalar(val.into()), dest)
107 /// Variant of `read_scalar_at_offset` helper function that does not perform
108 /// `data-race checks.
109 fn read_scalar_at_offset_racy(
113 layout: TyAndLayout<'tcx>,
114 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
115 let this = self.eval_context_ref();
116 let op_place = this.deref_operand(op)?;
117 let offset = Size::from_bytes(offset);
118 // Ensure that the following read at an offset is within bounds
119 assert!(op_place.layout.size >= offset + layout.size);
120 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
121 this.read_scalar_racy(value_place.into())
124 /// Variant of `write_scalar_at_offfset` helper function that does not perform
125 /// data-race checks.
126 fn write_scalar_at_offset_racy(
130 value: impl Into<ScalarMaybeUninit<Tag>>,
131 layout: TyAndLayout<'tcx>,
132 ) -> InterpResult<'tcx, ()> {
133 let this = self.eval_context_mut();
134 let op_place = this.deref_operand(op)?;
135 let offset = Size::from_bytes(offset);
136 // Ensure that the following read at an offset is within bounds
137 assert!(op_place.layout.size >= offset + layout.size);
138 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
139 this.write_scalar_racy(value.into(), value_place.into())
142 /// Load the data race allocation state for a given memory place
143 /// also returns the size and the offset of the result in the allocation
145 fn load_data_race_state<'a>(
146 &'a mut self, place: MPlaceTy<'tcx, Tag>
147 ) -> InterpResult<'tcx, (&'a mut VClockAlloc, Size, Size)> where 'mir: 'a {
148 let this = self.eval_context_mut();
150 let ptr = place.ptr.assert_ptr();
151 let size = place.layout.size;
152 let data_race = &mut this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race;
154 Ok((data_race, size, ptr.offset))
157 /// Update the data-race detector for an atomic read occuring at the
158 /// associated memory-place and on the current thread
159 fn validate_atomic_load(
160 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicReadOp
161 ) -> InterpResult<'tcx> {
162 let this = self.eval_context_mut();
163 let data_race = &*this.memory.extra.data_race;
164 if data_race.multi_threaded.get() {
165 data_race.advance_vector_clock();
169 ) = this.load_data_race_state(place)?;
171 "Atomic load on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
172 alloc.global.current_thread(), atomic,
173 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
176 let mut current_state = alloc.global.current_thread_state_mut();
177 if atomic == AtomicReadOp::Relaxed {
178 // Perform relaxed atomic load
179 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
180 range.load_relaxed(&mut *current_state);
183 // Perform acquire(or seq-cst) atomic load
184 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
185 range.acquire(&mut *current_state);
189 // Log changes to atomic memory
190 if log::log_enabled!(log::Level::Trace) {
191 for range in alloc.alloc_ranges.get_mut().iter(offset, size) {
193 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
194 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
200 std::mem::drop(current_state);
201 let data_race = &*this.memory.extra.data_race;
202 data_race.advance_vector_clock();
207 /// Update the data-race detector for an atomic write occuring at the
208 /// associated memory-place and on the current thread
209 fn validate_atomic_store(
210 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicWriteOp
211 ) -> InterpResult<'tcx> {
212 let this = self.eval_context_mut();
213 let data_race = &*this.memory.extra.data_race;
214 if data_race.multi_threaded.get() {
215 data_race.advance_vector_clock();
219 ) = this.load_data_race_state(place)?;
220 let current_thread = alloc.global.current_thread();
221 let mut current_state = alloc.global.current_thread_state_mut();
223 "Atomic store on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
224 current_thread, atomic,
225 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
228 if atomic == AtomicWriteOp::Relaxed {
229 // Perform relaxed atomic store
230 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
231 range.store_relaxed(&mut *current_state, current_thread);
234 // Perform release(or seq-cst) atomic store
235 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
236 range.release(&mut *current_state, current_thread);
240 // Log changes to atomic memory
241 if log::log_enabled!(log::Level::Trace) {
242 for range in alloc.alloc_ranges.get_mut().iter(offset, size) {
244 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
245 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
251 std::mem::drop(current_state);
252 let data_race = &*this.memory.extra.data_race;
253 data_race.advance_vector_clock();
258 /// Update the data-race detector for an atomic read-modify-write occuring
259 /// at the associated memory place and on the current thread
260 fn validate_atomic_rmw(
261 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicRWOp
262 ) -> InterpResult<'tcx> {
264 let this = self.eval_context_mut();
265 let data_race = &*this.memory.extra.data_race;
266 if data_race.multi_threaded.get() {
267 data_race.advance_vector_clock();
271 ) = this.load_data_race_state(place)?;
272 let current_thread = alloc.global.current_thread();
273 let mut current_state = alloc.global.current_thread_state_mut();
275 "Atomic RMW on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
276 current_thread, atomic,
277 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
280 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
281 let release = matches!(atomic, Release | AcqRel | SeqCst);
282 for range in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
283 //FIXME: this is probably still slightly wrong due to the quirks
284 // in the c++11 memory model
286 // Atomic RW-Op acquire
287 range.acquire(&mut *current_state);
289 range.load_relaxed(&mut *current_state);
292 // Atomic RW-Op release
293 range.rmw_release(&mut *current_state, current_thread);
295 range.rmw_relaxed(&mut *current_state);
299 // Log changes to atomic memory
300 if log::log_enabled!(log::Level::Trace) {
301 for range in alloc.alloc_ranges.get_mut().iter(offset, size) {
303 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
304 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
310 std::mem::drop(current_state);
311 let data_race = &*this.memory.extra.data_race;
312 data_race.advance_vector_clock();
317 /// Update the data-race detector for an atomic fence on the current thread
318 fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
319 let this = self.eval_context_mut();
320 let data_race = &*this.memory.extra.data_race;
321 if data_race.multi_threaded.get() {
322 data_race.advance_vector_clock();
324 log::trace!("Atomic fence on {:?} with ordering {:?}", data_race.current_thread(), atomic);
325 // Apply data-race detection for the current fences
326 // this treats AcqRel and SeqCst as the same as a acquire
327 // and release fence applied in the same timestamp.
328 if atomic != AtomicFenceOp::Release {
329 // Either Acquire | AcqRel | SeqCst
330 data_race.current_thread_state_mut().apply_acquire_fence();
332 if atomic != AtomicFenceOp::Acquire {
333 // Either Release | AcqRel | SeqCst
334 data_race.current_thread_state_mut().apply_release_fence();
337 data_race.advance_vector_clock();
343 /// Handle for locks to express their
344 /// acquire-release semantics
345 #[derive(Clone, Debug, Default)]
346 pub struct DataRaceLockHandle {
348 /// Internal acquire-release clock
349 /// to express the acquire release sync
350 /// found in concurrency primitives
353 impl DataRaceLockHandle {
354 pub fn set_values(&mut self, other: &Self) {
355 self.clock.set_values(&other.clock)
357 pub fn reset(&mut self) {
358 self.clock.set_zero_vector();
363 /// Avoid an atomic allocation for the common
364 /// case with atomic operations where the number
365 /// of active release sequences is small
366 #[derive(Clone, PartialEq, Eq)]
367 enum AtomicReleaseSequences {
369 /// Contains one or no values
370 /// if empty: (None, reset vector clock)
371 /// if one: (Some(thread), thread_clock)
372 ReleaseOneOrEmpty(Option<ThreadId>, VClock),
374 /// Contains two or more values
375 /// stored in a hash-map of thread id to
377 ReleaseMany(FxHashMap<ThreadId, VClock>)
379 impl AtomicReleaseSequences {
381 /// Return an empty set of atomic release sequences
383 fn new() -> AtomicReleaseSequences {
384 Self::ReleaseOneOrEmpty(None, VClock::default())
387 /// Remove all values except for the value stored at `thread` and set
388 /// the vector clock to the associated `clock` value
390 fn clear_and_set(&mut self, thread: ThreadId, clock: &VClock) {
392 Self::ReleaseOneOrEmpty(id, rel_clock) => {
394 rel_clock.set_values(clock);
396 Self::ReleaseMany(_) => {
397 *self = Self::ReleaseOneOrEmpty(Some(thread), clock.clone());
402 /// Remove all values except for the value stored at `thread`
404 fn clear_and_retain(&mut self, thread: ThreadId) {
406 Self::ReleaseOneOrEmpty(id, rel_clock) => {
407 // If the id is the same, then reatin the value
408 // otherwise delete and clear the release vector clock
409 if *id != Some(thread) {
411 rel_clock.set_zero_vector();
414 Self::ReleaseMany(hash_map) => {
415 // Retain only the thread element, so reduce to size
416 // of 1 or 0, and move to smaller format
417 if let Some(clock) = hash_map.remove(&thread) {
418 *self = Self::ReleaseOneOrEmpty(Some(thread), clock);
426 /// Insert a release sequence at `thread` with values `clock`
427 fn insert(&mut self, thread: ThreadId, clock: &VClock) {
429 Self::ReleaseOneOrEmpty(id, rel_clock) => {
430 if id.map_or(true, |id| id == thread) {
432 rel_clock.set_values(clock);
434 let mut hash_map = FxHashMap::default();
435 hash_map.insert(thread, clock.clone());
436 hash_map.insert(id.unwrap(), rel_clock.clone());
437 *self = Self::ReleaseMany(hash_map);
440 Self::ReleaseMany(hash_map) => {
441 hash_map.insert(thread, clock.clone());
446 /// Return the release sequence at `thread` if one exists
448 fn load(&self, thread: ThreadId) -> Option<&VClock> {
450 Self::ReleaseOneOrEmpty(id, clock) => {
451 if *id == Some(thread) {
457 Self::ReleaseMany(hash_map) => {
458 hash_map.get(&thread)
464 /// Custom debug implementation to correctly
465 /// print debug as a logical mapping from threads
467 impl Debug for AtomicReleaseSequences {
468 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
470 Self::ReleaseOneOrEmpty(None,_) => {
471 f.debug_map().finish()
473 Self::ReleaseOneOrEmpty(Some(id), clock) => {
474 f.debug_map().entry(&id, &clock).finish()
476 Self::ReleaseMany(hash_map) => {
477 Debug::fmt(hash_map, f)
483 /// Externally stored memory cell clocks
484 /// explicitly to reduce memory usage for the
485 /// common case where no atomic operations
486 /// exists on the memory cell
487 #[derive(Clone, PartialEq, Eq, Debug)]
488 struct AtomicMemoryCellClocks {
490 /// Synchronization vector for acquire-release semantics
493 /// The Hash-Map of all threads for which a release
494 /// sequence exists in the memory cell
495 release_sequences: AtomicReleaseSequences,
498 /// Memory Cell vector clock metadata
499 /// for data-race detection
500 #[derive(Clone, PartialEq, Eq, Debug)]
501 struct MemoryCellClocks {
503 /// The vector-clock of the last write
506 /// The id of the thread that performed the last write to this memory location
507 write_thread: ThreadId,
509 /// The vector-clock of the set of previous reads
510 /// each index is set to the timestamp that the associated
511 /// thread last read this value.
514 /// Atomic acquire & release sequence tracking clocks
515 /// for non-atomic memory in the common case this
516 /// value is set to None
517 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
520 /// Create a default memory cell clocks instance
521 /// for uninitialized memory
522 impl Default for MemoryCellClocks {
523 fn default() -> Self {
525 read: VClock::default(),
527 write_thread: ThreadId::new(u32::MAX as usize),
533 impl MemoryCellClocks {
535 /// Load the internal atomic memory cells if they exist
537 fn atomic(&mut self) -> Option<&AtomicMemoryCellClocks> {
538 match &self.atomic_ops {
539 Some(op) => Some(&*op),
544 /// Load or create the internal atomic memory metadata
545 /// if it does not exist
547 fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
548 self.atomic_ops.get_or_insert_with(|| {
549 Box::new(AtomicMemoryCellClocks {
550 sync_vector: VClock::default(),
551 release_sequences: AtomicReleaseSequences::new()
556 /// Update memory cell data-race tracking for atomic
557 /// load acquire semantics, is a no-op if this memory was
558 /// not used previously as atomic memory
559 fn acquire(&mut self, clocks: &mut ThreadClockSet) {
560 if let Some(atomic) = self.atomic() {
561 clocks.clock.join(&atomic.sync_vector);
564 /// Update memory cell data-race tracking for atomic
565 /// load relaxed semantics, is a no-op if this memory was
566 /// not used previously as atomic memory
567 fn load_relaxed(&mut self, clocks: &mut ThreadClockSet) {
568 if let Some(atomic) = self.atomic() {
569 clocks.fence_acquire.join(&atomic.sync_vector);
574 /// Update the memory cell data-race tracking for atomic
575 /// store release semantics
576 fn release(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
577 let atomic = self.atomic_mut();
578 atomic.sync_vector.set_values(&clocks.clock);
579 atomic.release_sequences.clear_and_set(thread, &clocks.clock);
581 /// Update the memory cell data-race tracking for atomic
582 /// store relaxed semantics
583 fn store_relaxed(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
584 let atomic = self.atomic_mut();
585 atomic.sync_vector.set_values(&clocks.fence_release);
586 if let Some(release) = atomic.release_sequences.load(thread) {
587 atomic.sync_vector.join(release);
589 atomic.release_sequences.clear_and_retain(thread);
591 /// Update the memory cell data-race tracking for atomic
592 /// store release semantics for RMW operations
593 fn rmw_release(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
594 let atomic = self.atomic_mut();
595 atomic.sync_vector.join(&clocks.clock);
596 atomic.release_sequences.insert(thread, &clocks.clock);
598 /// Update the memory cell data-race tracking for atomic
599 /// store relaxed semantics for RMW operations
600 fn rmw_relaxed(&mut self, clocks: &ThreadClockSet) {
601 let atomic = self.atomic_mut();
602 atomic.sync_vector.join(&clocks.fence_release);
607 /// Detect races for non-atomic read operations at the current memory cell
608 /// returns true if a data-race is detected
609 fn read_race_detect(&mut self, clocks: &ThreadClockSet, thread: ThreadId) -> bool {
610 if self.write <= clocks.clock[self.write_thread] {
611 self.read.set_at_thread(&clocks.clock, thread);
618 /// Detect races for non-atomic write operations at the current memory cell
619 /// returns true if a data-race is detected
620 fn write_race_detect(&mut self, clocks: &ThreadClockSet, thread: ThreadId) -> bool {
621 if self.write <= clocks.clock[self.write_thread] && self.read <= clocks.clock {
622 self.write = clocks.clock[thread];
623 self.write_thread = thread;
624 self.read.set_zero_vector();
632 /// Vector clock metadata for a logical memory allocation
633 #[derive(Debug, Clone)]
634 pub struct VClockAlloc {
636 /// Range of Vector clocks, mapping to the vector-clock
637 /// index of the last write to the bytes in this allocation
638 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
640 // Pointer to global state
646 /// Create a new data-race allocation detector
647 pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
649 global: Rc::clone(global),
650 alloc_ranges: RefCell::new(
651 RangeMap::new(len, MemoryCellClocks::default())
656 /// Report a data-race found in the program
657 /// this finds the two racing threads and the type
658 /// of data-race that occured, this will also
659 /// return info about the memory location the data-race
663 fn report_data_race<'tcx>(
664 global: &MemoryExtra, range: &MemoryCellClocks, action: &str,
665 pointer: Pointer<Tag>, len: Size
666 ) -> InterpResult<'tcx> {
667 let current_thread = global.current_thread();
668 let current_state = global.current_thread_state();
669 let mut write_clock = VClock::default();
671 other_action, other_thread, other_clock
672 ) = if range.write > current_state.clock[range.write_thread] {
674 // Create effective write-clock that the data-race occured with
675 let wclock = write_clock.get_mut_with_min_len(
676 current_state.clock.as_slice().len()
677 .max(range.write_thread.to_u32() as usize + 1)
679 wclock[range.write_thread.to_u32() as usize] = range.write;
680 ("WRITE", range.write_thread, write_clock.as_slice())
683 // Find index in the read-clock that the data-race occured with
684 let read_slice = range.read.as_slice();
685 let clock_slice = current_state.clock.as_slice();
686 let conflicting_index = read_slice.iter()
687 .zip(clock_slice.iter())
688 .enumerate().find_map(|(idx,(&read, &clock))| {
694 }).unwrap_or_else(|| {
695 assert!(read_slice.len() > clock_slice.len(), "BUG: cannot find read race yet reported data-race");
696 let rest_read = &read_slice[clock_slice.len()..];
697 rest_read.iter().enumerate().find_map(|(idx, &val)| {
699 Some(idx + clock_slice.len())
703 }).expect("Invariant broken for read-slice, no 0 element at the tail")
705 ("READ", ThreadId::new(conflicting_index), range.read.as_slice())
708 let current_thread_info = global.print_thread_metadata(current_thread);
709 let other_thread_info = global.print_thread_metadata(other_thread);
711 // Throw the data-race detection
713 "Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
714 \n\t\t -current vector clock = {:?}\
715 \n\t\t -conflicting timestamp = {:?}",
716 action, current_thread_info,
717 other_action, other_thread_info,
718 pointer.alloc_id, pointer.offset.bytes(), len.bytes(),
724 /// Detect data-races for an unsychronized read operation, will not perform
725 /// data-race threads if `multi-threaded` is false, either due to no threads
726 /// being created or if it is temporarily disabled during a racy read or write
728 pub fn read<'tcx>(&self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
729 if self.global.multi_threaded.get() {
730 let current_thread = self.global.current_thread();
731 let current_state = self.global.current_thread_state();
733 // The alloc-ranges are not split, however changes are not going to be made
734 // to the ranges being tested, so this is ok
735 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
736 for range in alloc_ranges.iter_mut(pointer.offset, len) {
737 if range.read_race_detect(&*current_state, current_thread) {
739 return Self::report_data_race(
740 &self.global,range, "READ", pointer, len
749 /// Detect data-races for an unsychronized write operation, will not perform
750 /// data-race threads if `multi-threaded` is false, either due to no threads
751 /// being created or if it is temporarily disabled during a racy read or write
753 pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
754 if self.global.multi_threaded.get() {
755 let current_thread = self.global.current_thread();
756 let current_state = self.global.current_thread_state();
757 for range in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
758 if range.write_race_detect(&*current_state, current_thread) {
760 return Self::report_data_race(
761 &self.global, range, "WRITE", pointer, len
770 /// Detect data-races for an unsychronized deallocate operation, will not perform
771 /// data-race threads if `multi-threaded` is false, either due to no threads
772 /// being created or if it is temporarily disabled during a racy read or write
774 pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
775 if self.global.multi_threaded.get() {
776 let current_thread = self.global.current_thread();
777 let current_state = self.global.current_thread_state();
778 for range in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
779 if range.write_race_detect(&*current_state, current_thread) {
781 return Self::report_data_race(
782 &self.global, range, "DEALLOCATE", pointer, len
793 /// The current set of vector clocks describing the state
794 /// of a thread, contains the happens-before clock and
795 /// additional metadata to model atomic fence operations
796 #[derive(Clone, Default, Debug)]
797 struct ThreadClockSet {
798 /// The increasing clock representing timestamps
799 /// that happen-before this thread.
802 /// The set of timestamps that will happen-before this
803 /// thread once it performs an acquire fence
804 fence_acquire: VClock,
806 /// The last timesamp of happens-before relations that
807 /// have been released by this thread by a fence
808 fence_release: VClock,
811 impl ThreadClockSet {
813 /// Apply the effects of a release fence to this
814 /// set of thread vector clocks
816 fn apply_release_fence(&mut self) {
817 self.fence_release.set_values(&self.clock);
820 /// Apply the effects of a acquire fence to this
821 /// set of thread vector clocks
823 fn apply_acquire_fence(&mut self) {
824 self.clock.join(&self.fence_acquire);
827 /// Increment the happens-before clock at a
830 fn increment_clock(&mut self, thread: ThreadId) {
831 self.clock.increment_thread(thread);
834 /// Join the happens-before clock with that of
835 /// another thread, used to model thread join
837 fn join_with(&mut self, other: &ThreadClockSet) {
838 self.clock.join(&other.clock);
842 /// Global data-race detection state, contains the currently
843 /// executing thread as well as the vector-clocks associated
844 /// with each of the threads.
845 #[derive(Debug, Clone)]
846 pub struct GlobalState {
848 /// Set to true once the first additional
849 /// thread has launched, due to the dependency
850 /// between before and after a thread launch
851 /// Any data-races must be recorded after this
852 /// so concurrent execution can ignore recording
854 multi_threaded: Cell<bool>,
856 /// The current vector clock for all threads
857 /// this includes threads that have terminated
859 thread_clocks: RefCell<IndexVec<ThreadId, ThreadClockSet>>,
861 /// Thread name cache for better diagnostics on the reporting
863 thread_names: RefCell<IndexVec<ThreadId, Option<Box<str>>>>,
865 /// The current thread being executed,
866 /// this is mirrored from the scheduler since
867 /// it is required for loading the current vector
868 /// clock for data-race detection
869 current_thread_id: Cell<ThreadId>,
873 /// Create a new global state, setup with just thread-id=0
874 /// advanced to timestamp = 1
875 pub fn new() -> Self {
876 let mut vec = IndexVec::new();
877 let thread_id = vec.push(ThreadClockSet::default());
878 vec[thread_id].increment_clock(thread_id);
880 multi_threaded: Cell::new(false),
881 thread_clocks: RefCell::new(vec),
882 thread_names: RefCell::new(IndexVec::new()),
883 current_thread_id: Cell::new(thread_id),
888 // Hook for thread creation, enabled multi-threaded execution and marks
889 // the current thread timestamp as happening-before the current thread
891 pub fn thread_created(&self, thread: ThreadId) {
893 // Enable multi-threaded execution mode now that there are at least
895 self.multi_threaded.set(true);
896 let current_thread = self.current_thread_id.get();
897 let mut vectors = self.thread_clocks.borrow_mut();
898 vectors.ensure_contains_elem(thread, Default::default);
899 let (current, created) = vectors.pick2_mut(current_thread, thread);
901 // Pre increment clocks before atomic operation
902 current.increment_clock(current_thread);
904 // The current thread happens-before the created thread
905 // so update the created vector clock
906 created.join_with(current);
908 // Post increment clocks after atomic operation
909 current.increment_clock(current_thread);
910 created.increment_clock(thread);
913 /// Hook on a thread join to update the implicit happens-before relation
914 /// between the joined thead and the current thread
916 pub fn thread_joined(&self, current_thread: ThreadId, join_thread: ThreadId) {
917 let mut vectors = self.thread_clocks.borrow_mut();
918 let (current, join) = vectors.pick2_mut(current_thread, join_thread);
920 // Pre increment clocks before atomic operation
921 current.increment_clock(current_thread);
922 join.increment_clock(join_thread);
924 // The join thread happens-before the current thread
925 // so update the current vector clock
926 current.join_with(join);
928 // Post increment clocks after atomic operation
929 current.increment_clock(current_thread);
930 join.increment_clock(join_thread);
933 /// Hook for updating the local tracker of the currently
934 /// enabled thread, should always be updated whenever
935 /// `active_thread` in thread.rs is updated
937 pub fn thread_set_active(&self, thread: ThreadId) {
938 self.current_thread_id.set(thread);
941 /// Hook for updating the local tracker of the threads name
942 /// this should always mirror the local value in thread.rs
943 /// the thread name is used for improved diagnostics
944 /// during a data-race
946 pub fn thread_set_name(&self, name: String) {
947 let name = name.into_boxed_str();
948 let mut names = self.thread_names.borrow_mut();
949 let thread = self.current_thread_id.get();
950 names.ensure_contains_elem(thread, Default::default);
951 names[thread] = Some(name);
955 /// Advance the vector clock for a thread
956 /// this is called before and after any atomic/synchronizing operations
957 /// that may manipulate state
959 fn advance_vector_clock(&self) {
960 let thread = self.current_thread_id.get();
961 let mut vectors = self.thread_clocks.borrow_mut();
962 vectors[thread].increment_clock(thread);
964 // Log the increment in the atomic vector clock
965 log::trace!("Atomic vector clock increase for {:?} to {:?}",thread, vectors[thread].clock);
969 /// Internal utility to identify a thread stored internally
970 /// returns the id and the name for better diagnostics
971 fn print_thread_metadata(&self, thread: ThreadId) -> String {
972 if let Some(Some(name)) = self.thread_names.borrow().get(thread) {
973 let name: &str = name;
974 format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), &*name)
976 format!("Thread(id = {:?})", thread.to_u32())
981 /// Acquire a lock, express that the previous call of
982 /// `validate_lock_release` must happen before this
983 pub fn validate_lock_acquire(&self, lock: &DataRaceLockHandle, thread: ThreadId) {
984 let mut ref_vector = self.thread_clocks.borrow_mut();
985 ref_vector[thread].increment_clock(thread);
987 let clocks = &mut ref_vector[thread];
988 clocks.clock.join(&lock.clock);
990 ref_vector[thread].increment_clock(thread);
993 /// Release a lock handle, express that this happens-before
994 /// any subsequent calls to `validate_lock_acquire`
995 pub fn validate_lock_release(&self, lock: &mut DataRaceLockHandle, thread: ThreadId) {
996 let mut ref_vector = self.thread_clocks.borrow_mut();
997 ref_vector[thread].increment_clock(thread);
999 let clocks = &ref_vector[thread];
1000 lock.clock.set_values(&clocks.clock);
1002 ref_vector[thread].increment_clock(thread);
1005 /// Release a lock handle, express that this happens-before
1006 /// any subsequent calls to `validate_lock_acquire` as well
1007 /// as any previous calls to this function after any
1008 /// `validate_lock_release` calls
1009 pub fn validate_lock_release_shared(&self, lock: &mut DataRaceLockHandle, thread: ThreadId) {
1010 let mut ref_vector = self.thread_clocks.borrow_mut();
1011 ref_vector[thread].increment_clock(thread);
1013 let clocks = &ref_vector[thread];
1014 lock.clock.join(&clocks.clock);
1016 ref_vector[thread].increment_clock(thread);
1019 /// Load the thread clock set associated with the current thread
1021 fn current_thread_state(&self) -> Ref<'_, ThreadClockSet> {
1022 let ref_vector = self.thread_clocks.borrow();
1023 let thread = self.current_thread_id.get();
1024 Ref::map(ref_vector, |vector| &vector[thread])
1027 /// Load the thread clock set associated with the current thread
1028 /// mutably for modification
1030 fn current_thread_state_mut(&self) -> RefMut<'_, ThreadClockSet> {
1031 let ref_vector = self.thread_clocks.borrow_mut();
1032 let thread = self.current_thread_id.get();
1033 RefMut::map(ref_vector, |vector| &mut vector[thread])
1036 /// Return the current thread, should be the same
1037 /// as the data-race active thread
1039 fn current_thread(&self) -> ThreadId {
1040 self.current_thread_id.get()
1045 /// The size of the vector-clock to store inline
1046 /// clock vectors larger than this will be stored on the heap
1047 const SMALL_VECTOR: usize = 4;
1049 /// The type of the time-stamps recorded in the data-race detector
1050 /// set to a type of unsigned integer
1051 type Timestamp = u32;
1053 /// A vector clock for detecting data-races
1055 /// - the last element in a VClock must not be 0
1056 /// -- this means that derive(PartialEq & Eq) is correct
1057 /// -- as there is no implicit zero tail that might be equal
1058 /// -- also simplifies the implementation of PartialOrd
1059 #[derive(Clone, PartialEq, Eq, Default, Debug)]
1060 pub struct VClock(SmallVec<[Timestamp; SMALL_VECTOR]>);
1064 /// Load the backing slice behind the clock vector.
1066 fn as_slice(&self) -> &[Timestamp] {
1070 /// Get a mutable slice to the internal vector with minimum `min_len`
1071 /// elements, to preserve invariants this vector must modify
1072 /// the `min_len`-1 nth element to a non-zero value
1074 fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [Timestamp] {
1075 if self.0.len() < min_len {
1076 self.0.resize(min_len, 0);
1078 assert!(self.0.len() >= min_len);
1079 self.0.as_mut_slice()
1082 /// Increment the vector clock at a known index
1084 fn increment_index(&mut self, idx: usize) {
1085 let mut_slice = self.get_mut_with_min_len(idx + 1);
1086 let idx_ref = &mut mut_slice[idx];
1087 *idx_ref = idx_ref.checked_add(1).expect("Vector clock overflow")
1090 // Increment the vector element representing the progress
1091 // of execution in the given thread
1093 pub fn increment_thread(&mut self, thread: ThreadId) {
1094 self.increment_index(thread.to_u32() as usize);
1097 // Join the two vector-clocks together, this
1098 // sets each vector-element to the maximum value
1099 // of that element in either of the two source elements.
1100 pub fn join(&mut self, other: &Self) {
1101 let rhs_slice = other.as_slice();
1102 let lhs_slice = self.get_mut_with_min_len(rhs_slice.len());
1104 // Element-wise set to maximum.
1105 for (l, &r) in lhs_slice.iter_mut().zip(rhs_slice.iter()) {
1110 /// Joins with a thread at a known index
1111 fn set_at_index(&mut self, other: &Self, idx: usize){
1112 let mut_slice = self.get_mut_with_min_len(idx + 1);
1113 let slice = other.as_slice();
1114 mut_slice[idx] = slice[idx];
1117 /// Join with a threads vector clock only at the desired index
1118 /// returns true if the value updated
1120 pub fn set_at_thread(&mut self, other: &Self, thread: ThreadId){
1121 self.set_at_index(other, thread.to_u32() as usize);
1124 /// Clear the vector to all zeros, stored as an empty internal
1127 pub fn set_zero_vector(&mut self) {
1131 /// Set the values stored in this vector clock
1132 /// to the values stored in another.
1133 pub fn set_values(&mut self, new_value: &VClock) {
1134 let new_slice = new_value.as_slice();
1135 self.0.resize(new_slice.len(), 0);
1136 self.0.copy_from_slice(new_slice);
1141 impl PartialOrd for VClock {
1142 fn partial_cmp(&self, other: &VClock) -> Option<Ordering> {
1144 // Load the values as slices
1145 let lhs_slice = self.as_slice();
1146 let rhs_slice = other.as_slice();
1148 // Iterate through the combined vector slice
1149 // keeping track of the order that is currently possible to satisfy.
1150 // If an ordering relation is detected to be impossible, then bail and
1151 // directly return None
1152 let mut iter = lhs_slice.iter().zip(rhs_slice.iter());
1153 let mut order = match iter.next() {
1154 Some((lhs, rhs)) => lhs.cmp(rhs),
1155 None => Ordering::Equal
1157 for (l, r) in iter {
1159 Ordering::Equal => order = l.cmp(r),
1160 Ordering::Less => if l > r {
1163 Ordering::Greater => if l < r {
1169 //Now test if either left or right have trailing elements
1170 // by the invariant the trailing elements have at least 1
1171 // non zero value, so no additional calculation is required
1172 // to determine the result of the PartialOrder
1173 let l_len = lhs_slice.len();
1174 let r_len = rhs_slice.len();
1175 match l_len.cmp(&r_len) {
1176 // Equal has no additional elements: return current order
1177 Ordering::Equal => Some(order),
1178 // Right has at least 1 element > than the implicit 0,
1179 // so the only valid values are Ordering::Less or None
1180 Ordering::Less => match order {
1181 Ordering::Less | Ordering::Equal => Some(Ordering::Less),
1182 Ordering::Greater => None
1184 // Left has at least 1 element > than the implicit 0,
1185 // so the only valid values are Ordering::Greater or None
1186 Ordering::Greater => match order {
1187 Ordering::Greater | Ordering::Equal => Some(Ordering::Greater),
1188 Ordering::Less => None
1193 fn lt(&self, other: &VClock) -> bool {
1194 // Load the values as slices
1195 let lhs_slice = self.as_slice();
1196 let rhs_slice = other.as_slice();
1198 // If l_len > r_len then at least one element
1199 // in l_len is > than r_len, therefore the result
1200 // is either Some(Greater) or None, so return false
1202 let l_len = lhs_slice.len();
1203 let r_len = rhs_slice.len();
1205 // If any elements on the left are greater than the right
1206 // then the result is None or Some(Greater), both of which
1207 // return false, the earlier test asserts that no elements in the
1208 // extended tail violate this assumption. Otherwise l <= r, finally
1209 // the case where the values are potentially equal needs to be considered
1210 // and false returned as well
1211 let mut equal = l_len == r_len;
1212 for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
1225 fn le(&self, other: &VClock) -> bool {
1226 // Load the values as slices
1227 let lhs_slice = self.as_slice();
1228 let rhs_slice = other.as_slice();
1230 // If l_len > r_len then at least one element
1231 // in l_len is > than r_len, therefore the result
1232 // is either Some(Greater) or None, so return false
1234 let l_len = lhs_slice.len();
1235 let r_len = rhs_slice.len();
1237 // If any elements on the left are greater than the right
1238 // then the result is None or Some(Greater), both of which
1239 // return false, the earlier test asserts that no elements in the
1240 // extended tail violate this assumption. Otherwise l <= r
1241 !lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l > r)
1247 fn gt(&self, other: &VClock) -> bool {
1248 // Load the values as slices
1249 let lhs_slice = self.as_slice();
1250 let rhs_slice = other.as_slice();
1252 // If r_len > l_len then at least one element
1253 // in r_len is > than l_len, therefore the result
1254 // is either Some(Less) or None, so return false
1256 let l_len = lhs_slice.len();
1257 let r_len = rhs_slice.len();
1259 // If any elements on the left are less than the right
1260 // then the result is None or Some(Less), both of which
1261 // return false, the earlier test asserts that no elements in the
1262 // extended tail violate this assumption. Otherwise l >=, finally
1263 // the case where the values are potentially equal needs to be considered
1264 // and false returned as well
1265 let mut equal = l_len == r_len;
1266 for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
1279 fn ge(&self, other: &VClock) -> bool {
1280 // Load the values as slices
1281 let lhs_slice = self.as_slice();
1282 let rhs_slice = other.as_slice();
1284 // If r_len > l_len then at least one element
1285 // in r_len is > than l_len, therefore the result
1286 // is either Some(Less) or None, so return false
1288 let l_len = lhs_slice.len();
1289 let r_len = rhs_slice.len();
1291 // If any elements on the left are less than the right
1292 // then the result is None or Some(Less), both of which
1293 // return false, the earlier test asserts that no elements in the
1294 // extended tail violate this assumption. Otherwise l >= r
1295 !lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l < r)
1302 impl Index<ThreadId> for VClock {
1303 type Output = Timestamp;
1306 fn index(&self, index: ThreadId) -> &Timestamp {
1307 self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
1312 /// Test vector clock ordering operations
1313 /// data-race detection is tested in the external
1317 use super::{VClock, Timestamp};
1318 use std::cmp::Ordering;
1322 let mut c1 = VClock::default();
1323 let mut c2 = VClock::default();
1325 c1.increment_index(5);
1327 c2.increment_index(53);
1329 c1.increment_index(53);
1331 c2.increment_index(5);
1336 fn test_partial_order() {
1338 assert_order(&[1], &[1], Some(Ordering::Equal));
1339 assert_order(&[1], &[2], Some(Ordering::Less));
1340 assert_order(&[2], &[1], Some(Ordering::Greater));
1341 assert_order(&[1], &[1,2], Some(Ordering::Less));
1342 assert_order(&[2], &[1,2], None);
1345 assert_order(&[400], &[0, 1], None);
1348 assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Equal));
1349 assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
1350 assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Greater));
1351 assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], None);
1352 assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Less));
1353 assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
1356 fn from_slice(mut slice: &[Timestamp]) -> VClock {
1357 while let Some(0) = slice.last() {
1358 slice = &slice[..slice.len() - 1]
1360 VClock(smallvec::SmallVec::from_slice(slice))
1363 fn assert_order(l: &[Timestamp], r: &[Timestamp], o: Option<Ordering>) {
1364 let l = from_slice(l);
1365 let r = from_slice(r);
1368 let compare = l.partial_cmp(&r);
1369 assert_eq!(compare, o, "Invalid comparison\n l: {:?}\n r: {:?}",l,r);
1370 let alt_compare = r.partial_cmp(&l);
1371 assert_eq!(alt_compare, o.map(Ordering::reverse), "Invalid alt comparison\n l: {:?}\n r: {:?}",l,r);
1373 //Test operatorsm with faster implementations
1375 matches!(compare,Some(Ordering::Less)), l < r,
1376 "Invalid (<):\n l: {:?}\n r: {:?}",l,r
1379 matches!(compare,Some(Ordering::Less) | Some(Ordering::Equal)), l <= r,
1380 "Invalid (<=):\n l: {:?}\n r: {:?}",l,r
1383 matches!(compare,Some(Ordering::Greater)), l > r,
1384 "Invalid (>):\n l: {:?}\n r: {:?}",l,r
1387 matches!(compare,Some(Ordering::Greater) | Some(Ordering::Equal)), l >= r,
1388 "Invalid (>=):\n l: {:?}\n r: {:?}",l,r
1391 matches!(alt_compare,Some(Ordering::Less)), r < l,
1392 "Invalid alt (<):\n l: {:?}\n r: {:?}",l,r
1395 matches!(alt_compare,Some(Ordering::Less) | Some(Ordering::Equal)), r <= l,
1396 "Invalid alt (<=):\n l: {:?}\n r: {:?}",l,r
1399 matches!(alt_compare,Some(Ordering::Greater)), r > l,
1400 "Invalid alt (>):\n l: {:?}\n r: {:?}",l,r
1403 matches!(alt_compare,Some(Ordering::Greater) | Some(Ordering::Equal)), r >= l,
1404 "Invalid alt (>=):\n l: {:?}\n r: {:?}",l,r