1 //! Implementation of a data-race detector
2 //! uses Lamport Timestamps / Vector-clocks
3 //! base on the Dyamic Race Detection for C++:
4 //! - https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf
5 //! to extend data-race detection to work correctly with fences
7 //! This does not explore weak memory orders and so can still miss data-races
8 //! but should not report false-positives
10 use std::{fmt::{self, Debug}, cmp::Ordering, rc::Rc, cell::{Cell, RefCell, Ref, RefMut}, ops::Index};
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_target::abi::Size;
14 use rustc_middle::ty::layout::TyAndLayout;
15 use rustc_data_structures::fx::FxHashMap;
17 use smallvec::SmallVec;
21 pub type AllocExtra = VClockAlloc;
22 pub type MemoryExtra = Rc<GlobalState>;
24 /// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive)
25 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
34 /// Valid atomic read operations, subset of atomic::Ordering
35 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
36 pub enum AtomicReadOp {
42 /// Valid atomic write operations, subset of atomic::Ordering
43 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
44 pub enum AtomicWriteOp {
51 /// Valid atomic fence operations, subset of atomic::Ordering
52 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
53 pub enum AtomicFenceOp {
60 /// Evaluation context extensions
61 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
62 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
64 /// Variant of `read_immediate` that does not perform `data-race` checks.
65 fn read_immediate_racy(&self, op: MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> {
66 let this = self.eval_context_ref();
67 let data_race = &*this.memory.extra.data_race;
69 let old = data_race.multi_threaded.replace(false);
70 let res = this.read_immediate(op.into());
71 data_race.multi_threaded.set(old);
76 /// Variant of `write_immediate` that does not perform `data-race` checks.
77 fn write_immediate_racy(
78 &mut self, src: Immediate<Tag>, dest: MPlaceTy<'tcx, Tag>
79 ) -> InterpResult<'tcx> {
80 let this = self.eval_context_mut();
81 let data_race = &*this.memory.extra.data_race;
82 let old = data_race.multi_threaded.replace(false);
84 let imm = this.write_immediate(src, dest.into());
86 let data_race = &*this.memory.extra.data_race;
87 data_race.multi_threaded.set(old);
91 /// Variant of `read_scalar` that does not perform data-race checks.
93 &self, op: MPlaceTy<'tcx, Tag>
94 )-> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
95 Ok(self.read_immediate_racy(op)?.to_scalar_or_uninit())
98 /// Variant of `write_scalar` that does not perform data-race checks.
100 &mut self, val: ScalarMaybeUninit<Tag>, dest: MPlaceTy<'tcx, Tag>
101 ) -> InterpResult<'tcx> {
102 self.write_immediate_racy(Immediate::Scalar(val.into()), dest)
105 /// Variant of `read_scalar_at_offset` helper function that does not perform
106 /// `data-race checks.
107 fn read_scalar_at_offset_racy(
111 layout: TyAndLayout<'tcx>,
112 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
113 let this = self.eval_context_ref();
114 let op_place = this.deref_operand(op)?;
115 let offset = Size::from_bytes(offset);
116 // Ensure that the following read at an offset is within bounds
117 assert!(op_place.layout.size >= offset + layout.size);
118 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
119 this.read_scalar_racy(value_place.into())
122 /// Variant of `write_scalar_at_offfset` helper function that does not perform
123 /// data-race checks.
124 fn write_scalar_at_offset_racy(
128 value: impl Into<ScalarMaybeUninit<Tag>>,
129 layout: TyAndLayout<'tcx>,
130 ) -> InterpResult<'tcx, ()> {
131 let this = self.eval_context_mut();
132 let op_place = this.deref_operand(op)?;
133 let offset = Size::from_bytes(offset);
134 // Ensure that the following read at an offset is within bounds
135 assert!(op_place.layout.size >= offset + layout.size);
136 let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
137 this.write_scalar_racy(value.into(), value_place.into())
140 /// Load the data race allocation state for a given memory place
141 /// also returns the size and the offset of the result in the allocation
143 fn load_data_race_state<'a>(
144 &'a mut self, place: MPlaceTy<'tcx, Tag>
145 ) -> InterpResult<'tcx, (&'a mut VClockAlloc, Size, Size)> where 'mir: 'a {
146 let this = self.eval_context_mut();
148 let ptr = place.ptr.assert_ptr();
149 let size = place.layout.size;
150 let data_race = &mut this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race;
152 Ok((data_race, size, ptr.offset))
155 /// Update the data-race detector for an atomic read occuring at the
156 /// associated memory-place and on the current thread
157 fn validate_atomic_load(
158 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicReadOp
159 ) -> InterpResult<'tcx> {
160 let this = self.eval_context_mut();
161 let data_race = &*this.memory.extra.data_race;
162 if data_race.multi_threaded.get() {
163 data_race.advance_vector_clock();
167 ) = this.load_data_race_state(place)?;
169 "Atomic load on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
170 alloc.global.current_thread(), atomic,
171 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
174 let mut current_state = alloc.global.current_thread_state_mut();
175 if atomic == AtomicReadOp::Relaxed {
176 // Perform relaxed atomic load
177 for (_,range) in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
178 range.load_relaxed(&mut *current_state);
181 // Perform acquire(or seq-cst) atomic load
182 for (_,range) in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
183 range.acquire(&mut *current_state);
187 // Log changes to atomic memory
188 if log::log_enabled!(log::Level::Trace) {
189 for (_,range) in alloc.alloc_ranges.get_mut().iter(offset, size) {
191 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
192 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
198 std::mem::drop(current_state);
199 let data_race = &*this.memory.extra.data_race;
200 data_race.advance_vector_clock();
205 /// Update the data-race detector for an atomic write occuring at the
206 /// associated memory-place and on the current thread
207 fn validate_atomic_store(
208 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicWriteOp
209 ) -> InterpResult<'tcx> {
210 let this = self.eval_context_mut();
211 let data_race = &*this.memory.extra.data_race;
212 if data_race.multi_threaded.get() {
213 data_race.advance_vector_clock();
217 ) = this.load_data_race_state(place)?;
218 let current_thread = alloc.global.current_thread();
219 let mut current_state = alloc.global.current_thread_state_mut();
221 "Atomic store on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
222 current_thread, atomic,
223 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
226 if atomic == AtomicWriteOp::Relaxed {
227 // Perform relaxed atomic store
228 for (_,range) in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
229 range.store_relaxed(&mut *current_state, current_thread);
232 // Perform release(or seq-cst) atomic store
233 for (_,range) in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
234 range.release(&mut *current_state, current_thread);
238 // Log changes to atomic memory
239 if log::log_enabled!(log::Level::Trace) {
240 for (_,range) in alloc.alloc_ranges.get_mut().iter(offset, size) {
242 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
243 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
249 std::mem::drop(current_state);
250 let data_race = &*this.memory.extra.data_race;
251 data_race.advance_vector_clock();
256 /// Update the data-race detector for an atomic read-modify-write occuring
257 /// at the associated memory place and on the current thread
258 fn validate_atomic_rmw(
259 &mut self, place: MPlaceTy<'tcx, Tag>, atomic: AtomicRWOp
260 ) -> InterpResult<'tcx> {
262 let this = self.eval_context_mut();
263 let data_race = &*this.memory.extra.data_race;
264 if data_race.multi_threaded.get() {
265 data_race.advance_vector_clock();
269 ) = this.load_data_race_state(place)?;
270 let current_thread = alloc.global.current_thread();
271 let mut current_state = alloc.global.current_thread_state_mut();
273 "Atomic RMW on {:?} with ordering {:?}, in memory({:?}, offset={}, size={})",
274 current_thread, atomic,
275 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes()
278 let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
279 let release = matches!(atomic, Release | AcqRel | SeqCst);
280 for (_,range) in alloc.alloc_ranges.get_mut().iter_mut(offset, size) {
281 //FIXME: this is probably still slightly wrong due to the quirks
282 // in the c++11 memory model
284 // Atomic RW-Op acquire
285 range.acquire(&mut *current_state);
287 range.load_relaxed(&mut *current_state);
290 // Atomic RW-Op release
291 range.rmw_release(&mut *current_state, current_thread);
293 range.rmw_relaxed(&mut *current_state);
297 // Log changes to atomic memory
298 if log::log_enabled!(log::Level::Trace) {
299 for (_,range) in alloc.alloc_ranges.get_mut().iter(offset, size) {
301 " updated atomic memory({:?}, offset={}, size={}) to {:#?}",
302 place.ptr.assert_ptr().alloc_id, offset.bytes(), size.bytes(),
308 std::mem::drop(current_state);
309 let data_race = &*this.memory.extra.data_race;
310 data_race.advance_vector_clock();
315 /// Update the data-race detector for an atomic fence on the current thread
316 fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
317 let this = self.eval_context_mut();
318 let data_race = &*this.memory.extra.data_race;
319 if data_race.multi_threaded.get() {
320 data_race.advance_vector_clock();
322 log::trace!("Atomic fence on {:?} with ordering {:?}", data_race.current_thread(), atomic);
323 // Apply data-race detection for the current fences
324 // this treats AcqRel and SeqCst as the same as a acquire
325 // and release fence applied in the same timestamp.
326 if atomic != AtomicFenceOp::Release {
327 // Either Acquire | AcqRel | SeqCst
328 data_race.current_thread_state_mut().apply_acquire_fence();
330 if atomic != AtomicFenceOp::Acquire {
331 // Either Release | AcqRel | SeqCst
332 data_race.current_thread_state_mut().apply_release_fence();
335 data_race.advance_vector_clock();
341 /// Handle for locks to express their
342 /// acquire-release semantics
343 #[derive(Clone, Debug, Default)]
344 pub struct DataRaceLockHandle {
346 /// Internal acquire-release clock
347 /// to express the acquire release sync
348 /// found in concurrency primitives
351 impl DataRaceLockHandle {
352 pub fn set_values(&mut self, other: &Self) {
353 self.clock.set_values(&other.clock)
355 pub fn reset(&mut self) {
356 self.clock.set_zero_vector();
361 /// Avoid an atomic allocation for the common
362 /// case with atomic operations where the number
363 /// of active release sequences is small
364 #[derive(Clone, PartialEq, Eq)]
365 enum AtomicReleaseSequences {
367 /// Contains one or no values
368 /// if empty: (None, reset vector clock)
369 /// if one: (Some(thread), thread_clock)
370 ReleaseOneOrEmpty(Option<ThreadId>, VClock),
372 /// Contains two or more values
373 /// stored in a hash-map of thread id to
375 ReleaseMany(FxHashMap<ThreadId, VClock>)
377 impl AtomicReleaseSequences {
379 /// Return an empty set of atomic release sequences
381 fn new() -> AtomicReleaseSequences {
382 Self::ReleaseOneOrEmpty(None, VClock::default())
385 /// Remove all values except for the value stored at `thread` and set
386 /// the vector clock to the associated `clock` value
388 fn clear_and_set(&mut self, thread: ThreadId, clock: &VClock) {
390 Self::ReleaseOneOrEmpty(id, rel_clock) => {
392 rel_clock.set_values(clock);
394 Self::ReleaseMany(_) => {
395 *self = Self::ReleaseOneOrEmpty(Some(thread), clock.clone());
400 /// Remove all values except for the value stored at `thread`
402 fn clear_and_retain(&mut self, thread: ThreadId) {
404 Self::ReleaseOneOrEmpty(id, rel_clock) => {
405 // If the id is the same, then reatin the value
406 // otherwise delete and clear the release vector clock
407 if *id != Some(thread) {
409 rel_clock.set_zero_vector();
412 Self::ReleaseMany(hash_map) => {
413 // Retain only the thread element, so reduce to size
414 // of 1 or 0, and move to smaller format
415 if let Some(clock) = hash_map.remove(&thread) {
416 *self = Self::ReleaseOneOrEmpty(Some(thread), clock);
424 /// Insert a release sequence at `thread` with values `clock`
425 fn insert(&mut self, thread: ThreadId, clock: &VClock) {
427 Self::ReleaseOneOrEmpty(id, rel_clock) => {
428 if id.map_or(true, |id| id == thread) {
430 rel_clock.set_values(clock);
432 let mut hash_map = FxHashMap::default();
433 hash_map.insert(thread, clock.clone());
434 hash_map.insert(id.unwrap(), rel_clock.clone());
435 *self = Self::ReleaseMany(hash_map);
438 Self::ReleaseMany(hash_map) => {
439 hash_map.insert(thread, clock.clone());
444 /// Return the release sequence at `thread` if one exists
446 fn load(&self, thread: ThreadId) -> Option<&VClock> {
448 Self::ReleaseOneOrEmpty(id, clock) => {
449 if *id == Some(thread) {
455 Self::ReleaseMany(hash_map) => {
456 hash_map.get(&thread)
462 /// Custom debug implementation to correctly
463 /// print debug as a logical mapping from threads
465 impl Debug for AtomicReleaseSequences {
466 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
468 Self::ReleaseOneOrEmpty(None,_) => {
469 f.debug_map().finish()
471 Self::ReleaseOneOrEmpty(Some(id), clock) => {
472 f.debug_map().entry(&id, &clock).finish()
474 Self::ReleaseMany(hash_map) => {
475 Debug::fmt(hash_map, f)
481 /// Externally stored memory cell clocks
482 /// explicitly to reduce memory usage for the
483 /// common case where no atomic operations
484 /// exists on the memory cell
485 #[derive(Clone, PartialEq, Eq, Debug)]
486 struct AtomicMemoryCellClocks {
488 /// Synchronization vector for acquire-release semantics
491 /// The Hash-Map of all threads for which a release
492 /// sequence exists in the memory cell
493 release_sequences: AtomicReleaseSequences,
496 /// Memory Cell vector clock metadata
497 /// for data-race detection
498 #[derive(Clone, PartialEq, Eq, Debug)]
499 struct MemoryCellClocks {
501 /// The vector-clock of the last write
504 /// The id of the thread that performed the last write to this memory location
505 write_thread: ThreadId,
507 /// The vector-clock of the set of previous reads
508 /// each index is set to the timestamp that the associated
509 /// thread last read this value.
512 /// Atomic acquire & release sequence tracking clocks
513 /// for non-atomic memory in the common case this
514 /// value is set to None
515 atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
518 /// Create a default memory cell clocks instance
519 /// for uninitialized memory
520 impl Default for MemoryCellClocks {
521 fn default() -> Self {
523 read: VClock::default(),
525 write_thread: ThreadId::new(u32::MAX as usize),
531 impl MemoryCellClocks {
533 /// Load the internal atomic memory cells if they exist
535 fn atomic(&mut self) -> Option<&AtomicMemoryCellClocks> {
536 match &self.atomic_ops {
537 Some(op) => Some(&*op),
542 /// Load or create the internal atomic memory metadata
543 /// if it does not exist
545 fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
546 self.atomic_ops.get_or_insert_with(|| {
547 Box::new(AtomicMemoryCellClocks {
548 sync_vector: VClock::default(),
549 release_sequences: AtomicReleaseSequences::new()
554 /// Update memory cell data-race tracking for atomic
555 /// load acquire semantics, is a no-op if this memory was
556 /// not used previously as atomic memory
557 fn acquire(&mut self, clocks: &mut ThreadClockSet) {
558 if let Some(atomic) = self.atomic() {
559 clocks.clock.join(&atomic.sync_vector);
562 /// Update memory cell data-race tracking for atomic
563 /// load relaxed semantics, is a no-op if this memory was
564 /// not used previously as atomic memory
565 fn load_relaxed(&mut self, clocks: &mut ThreadClockSet) {
566 if let Some(atomic) = self.atomic() {
567 clocks.fence_acquire.join(&atomic.sync_vector);
572 /// Update the memory cell data-race tracking for atomic
573 /// store release semantics
574 fn release(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
575 let atomic = self.atomic_mut();
576 atomic.sync_vector.set_values(&clocks.clock);
577 atomic.release_sequences.clear_and_set(thread, &clocks.clock);
579 /// Update the memory cell data-race tracking for atomic
580 /// store relaxed semantics
581 fn store_relaxed(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
582 let atomic = self.atomic_mut();
583 atomic.sync_vector.set_values(&clocks.fence_release);
584 if let Some(release) = atomic.release_sequences.load(thread) {
585 atomic.sync_vector.join(release);
587 atomic.release_sequences.clear_and_retain(thread);
589 /// Update the memory cell data-race tracking for atomic
590 /// store release semantics for RMW operations
591 fn rmw_release(&mut self, clocks: &ThreadClockSet, thread: ThreadId) {
592 let atomic = self.atomic_mut();
593 atomic.sync_vector.join(&clocks.clock);
594 atomic.release_sequences.insert(thread, &clocks.clock);
596 /// Update the memory cell data-race tracking for atomic
597 /// store relaxed semantics for RMW operations
598 fn rmw_relaxed(&mut self, clocks: &ThreadClockSet) {
599 let atomic = self.atomic_mut();
600 atomic.sync_vector.join(&clocks.fence_release);
605 /// Detect races for non-atomic read operations at the current memory cell
606 /// returns true if a data-race is detected
607 fn read_race_detect(&mut self, clocks: &ThreadClockSet, thread: ThreadId) -> bool {
608 if self.write <= clocks.clock[self.write_thread] {
609 self.read.set_at_thread(&clocks.clock, thread);
616 /// Detect races for non-atomic write operations at the current memory cell
617 /// returns true if a data-race is detected
618 fn write_race_detect(&mut self, clocks: &ThreadClockSet, thread: ThreadId) -> bool {
619 if self.write <= clocks.clock[self.write_thread] && self.read <= clocks.clock {
620 self.write = clocks.clock[thread];
621 self.write_thread = thread;
622 self.read.set_zero_vector();
630 /// Vector clock metadata for a logical memory allocation
631 #[derive(Debug, Clone)]
632 pub struct VClockAlloc {
634 /// Range of Vector clocks, mapping to the vector-clock
635 /// index of the last write to the bytes in this allocation
636 alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
638 // Pointer to global state
644 /// Create a new data-race allocation detector
645 pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
647 global: Rc::clone(global),
648 alloc_ranges: RefCell::new(
649 RangeMap::new(len, MemoryCellClocks::default())
654 /// Report a data-race found in the program
655 /// this finds the two racing threads and the type
656 /// of data-race that occured, this will also
657 /// return info about the memory location the data-race
661 fn report_data_race<'tcx>(
662 global: &MemoryExtra, range: &MemoryCellClocks, action: &str,
663 pointer: Pointer<Tag>, len: Size
664 ) -> InterpResult<'tcx> {
665 let current_thread = global.current_thread();
666 let current_state = global.current_thread_state();
667 let mut write_clock = VClock::default();
669 other_action, other_thread, other_clock
670 ) = if range.write > current_state.clock[range.write_thread] {
672 // Create effective write-clock that the data-race occured with
673 let wclock = write_clock.get_mut_with_min_len(
674 current_state.clock.as_slice().len()
675 .max(range.write_thread.to_u32() as usize + 1)
677 wclock[range.write_thread.to_u32() as usize] = range.write;
678 ("WRITE", range.write_thread, write_clock.as_slice())
681 // Find index in the read-clock that the data-race occured with
682 let read_slice = range.read.as_slice();
683 let clock_slice = current_state.clock.as_slice();
684 let conflicting_index = read_slice.iter()
685 .zip(clock_slice.iter())
686 .enumerate().find_map(|(idx,(&read, &clock))| {
692 }).unwrap_or_else(|| {
693 assert!(read_slice.len() > clock_slice.len(), "BUG: cannot find read race yet reported data-race");
694 let rest_read = &read_slice[clock_slice.len()..];
695 rest_read.iter().enumerate().find_map(|(idx, &val)| {
697 Some(idx + clock_slice.len())
701 }).expect("Invariant broken for read-slice, no 0 element at the tail")
703 ("READ", ThreadId::new(conflicting_index), range.read.as_slice())
706 let current_thread_info = global.print_thread_metadata(current_thread);
707 let other_thread_info = global.print_thread_metadata(other_thread);
709 // Throw the data-race detection
711 "Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
712 \n\t\t -current vector clock = {:?}\
713 \n\t\t -conflicting timestamp = {:?}",
714 action, current_thread_info,
715 other_action, other_thread_info,
716 pointer.alloc_id, pointer.offset.bytes(), len.bytes(),
722 /// Detect data-races for an unsychronized read operation, will not perform
723 /// data-race threads if `multi-threaded` is false, either due to no threads
724 /// being created or if it is temporarily disabled during a racy read or write
726 pub fn read<'tcx>(&self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
727 if self.global.multi_threaded.get() {
728 let current_thread = self.global.current_thread();
729 let current_state = self.global.current_thread_state();
731 // The alloc-ranges are not split, however changes are not going to be made
732 // to the ranges being tested, so this is ok
733 let mut alloc_ranges = self.alloc_ranges.borrow_mut();
734 for (_,range) in alloc_ranges.iter_mut(pointer.offset, len) {
735 if range.read_race_detect(&*current_state, current_thread) {
737 return Self::report_data_race(
738 &self.global,range, "READ", pointer, len
747 /// Detect data-races for an unsychronized write operation, will not perform
748 /// data-race threads if `multi-threaded` is false, either due to no threads
749 /// being created or if it is temporarily disabled during a racy read or write
751 pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
752 if self.global.multi_threaded.get() {
753 let current_thread = self.global.current_thread();
754 let current_state = self.global.current_thread_state();
755 for (_,range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
756 if range.write_race_detect(&*current_state, current_thread) {
758 return Self::report_data_race(
759 &self.global, range, "WRITE", pointer, len
768 /// Detect data-races for an unsychronized deallocate operation, will not perform
769 /// data-race threads if `multi-threaded` is false, either due to no threads
770 /// being created or if it is temporarily disabled during a racy read or write
772 pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
773 if self.global.multi_threaded.get() {
774 let current_thread = self.global.current_thread();
775 let current_state = self.global.current_thread_state();
776 for (_,range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
777 if range.write_race_detect(&*current_state, current_thread) {
779 return Self::report_data_race(
780 &self.global, range, "DEALLOCATE", pointer, len
791 /// The current set of vector clocks describing the state
792 /// of a thread, contains the happens-before clock and
793 /// additional metadata to model atomic fence operations
794 #[derive(Clone, Default, Debug)]
795 struct ThreadClockSet {
796 /// The increasing clock representing timestamps
797 /// that happen-before this thread.
800 /// The set of timestamps that will happen-before this
801 /// thread once it performs an acquire fence
802 fence_acquire: VClock,
804 /// The last timesamp of happens-before relations that
805 /// have been released by this thread by a fence
806 fence_release: VClock,
809 impl ThreadClockSet {
811 /// Apply the effects of a release fence to this
812 /// set of thread vector clocks
814 fn apply_release_fence(&mut self) {
815 self.fence_release.set_values(&self.clock);
818 /// Apply the effects of a acquire fence to this
819 /// set of thread vector clocks
821 fn apply_acquire_fence(&mut self) {
822 self.clock.join(&self.fence_acquire);
825 /// Increment the happens-before clock at a
828 fn increment_clock(&mut self, thread: ThreadId) {
829 self.clock.increment_thread(thread);
832 /// Join the happens-before clock with that of
833 /// another thread, used to model thread join
835 fn join_with(&mut self, other: &ThreadClockSet) {
836 self.clock.join(&other.clock);
840 /// Global data-race detection state, contains the currently
841 /// executing thread as well as the vector-clocks associated
842 /// with each of the threads.
843 #[derive(Debug, Clone)]
844 pub struct GlobalState {
846 /// Set to true once the first additional
847 /// thread has launched, due to the dependency
848 /// between before and after a thread launch
849 /// Any data-races must be recorded after this
850 /// so concurrent execution can ignore recording
852 multi_threaded: Cell<bool>,
854 /// The current vector clock for all threads
855 /// this includes threads that have terminated
857 thread_clocks: RefCell<IndexVec<ThreadId, ThreadClockSet>>,
859 /// Thread name cache for better diagnostics on the reporting
861 thread_names: RefCell<IndexVec<ThreadId, Option<Box<str>>>>,
863 /// The current thread being executed,
864 /// this is mirrored from the scheduler since
865 /// it is required for loading the current vector
866 /// clock for data-race detection
867 current_thread_id: Cell<ThreadId>,
871 /// Create a new global state, setup with just thread-id=0
872 /// advanced to timestamp = 1
873 pub fn new() -> Self {
874 let mut vec = IndexVec::new();
875 let thread_id = vec.push(ThreadClockSet::default());
876 vec[thread_id].increment_clock(thread_id);
878 multi_threaded: Cell::new(false),
879 thread_clocks: RefCell::new(vec),
880 thread_names: RefCell::new(IndexVec::new()),
881 current_thread_id: Cell::new(thread_id),
886 // Hook for thread creation, enabled multi-threaded execution and marks
887 // the current thread timestamp as happening-before the current thread
889 pub fn thread_created(&self, thread: ThreadId) {
891 // Enable multi-threaded execution mode now that there are at least
893 self.multi_threaded.set(true);
894 let current_thread = self.current_thread_id.get();
895 let mut vectors = self.thread_clocks.borrow_mut();
896 vectors.ensure_contains_elem(thread, Default::default);
897 let (current, created) = vectors.pick2_mut(current_thread, thread);
899 // Pre increment clocks before atomic operation
900 current.increment_clock(current_thread);
902 // The current thread happens-before the created thread
903 // so update the created vector clock
904 created.join_with(current);
906 // Post increment clocks after atomic operation
907 current.increment_clock(current_thread);
908 created.increment_clock(thread);
911 /// Hook on a thread join to update the implicit happens-before relation
912 /// between the joined thead and the current thread
914 pub fn thread_joined(&self, current_thread: ThreadId, join_thread: ThreadId) {
915 let mut vectors = self.thread_clocks.borrow_mut();
916 let (current, join) = vectors.pick2_mut(current_thread, join_thread);
918 // Pre increment clocks before atomic operation
919 current.increment_clock(current_thread);
920 join.increment_clock(join_thread);
922 // The join thread happens-before the current thread
923 // so update the current vector clock
924 current.join_with(join);
926 // Post increment clocks after atomic operation
927 current.increment_clock(current_thread);
928 join.increment_clock(join_thread);
931 /// Hook for updating the local tracker of the currently
932 /// enabled thread, should always be updated whenever
933 /// `active_thread` in thread.rs is updated
935 pub fn thread_set_active(&self, thread: ThreadId) {
936 self.current_thread_id.set(thread);
939 /// Hook for updating the local tracker of the threads name
940 /// this should always mirror the local value in thread.rs
941 /// the thread name is used for improved diagnostics
942 /// during a data-race
944 pub fn thread_set_name(&self, name: String) {
945 let name = name.into_boxed_str();
946 let mut names = self.thread_names.borrow_mut();
947 let thread = self.current_thread_id.get();
948 names.ensure_contains_elem(thread, Default::default);
949 names[thread] = Some(name);
953 /// Advance the vector clock for a thread
954 /// this is called before and after any atomic/synchronizing operations
955 /// that may manipulate state
957 fn advance_vector_clock(&self) {
958 let thread = self.current_thread_id.get();
959 let mut vectors = self.thread_clocks.borrow_mut();
960 vectors[thread].increment_clock(thread);
962 // Log the increment in the atomic vector clock
963 log::trace!("Atomic vector clock increase for {:?} to {:?}",thread, vectors[thread].clock);
967 /// Internal utility to identify a thread stored internally
968 /// returns the id and the name for better diagnostics
969 fn print_thread_metadata(&self, thread: ThreadId) -> String {
970 if let Some(Some(name)) = self.thread_names.borrow().get(thread) {
971 let name: &str = name;
972 format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), &*name)
974 format!("Thread(id = {:?})", thread.to_u32())
979 /// Acquire a lock, express that the previous call of
980 /// `validate_lock_release` must happen before this
981 pub fn validate_lock_acquire(&self, lock: &DataRaceLockHandle, thread: ThreadId) {
982 let mut ref_vector = self.thread_clocks.borrow_mut();
983 ref_vector[thread].increment_clock(thread);
985 let clocks = &mut ref_vector[thread];
986 clocks.clock.join(&lock.clock);
988 ref_vector[thread].increment_clock(thread);
991 /// Release a lock handle, express that this happens-before
992 /// any subsequent calls to `validate_lock_acquire`
993 pub fn validate_lock_release(&self, lock: &mut DataRaceLockHandle, thread: ThreadId) {
994 let mut ref_vector = self.thread_clocks.borrow_mut();
995 ref_vector[thread].increment_clock(thread);
997 let clocks = &ref_vector[thread];
998 lock.clock.set_values(&clocks.clock);
1000 ref_vector[thread].increment_clock(thread);
1003 /// Release a lock handle, express that this happens-before
1004 /// any subsequent calls to `validate_lock_acquire` as well
1005 /// as any previous calls to this function after any
1006 /// `validate_lock_release` calls
1007 pub fn validate_lock_release_shared(&self, lock: &mut DataRaceLockHandle, thread: ThreadId) {
1008 let mut ref_vector = self.thread_clocks.borrow_mut();
1009 ref_vector[thread].increment_clock(thread);
1011 let clocks = &ref_vector[thread];
1012 lock.clock.join(&clocks.clock);
1014 ref_vector[thread].increment_clock(thread);
1017 /// Load the thread clock set associated with the current thread
1019 fn current_thread_state(&self) -> Ref<'_, ThreadClockSet> {
1020 let ref_vector = self.thread_clocks.borrow();
1021 let thread = self.current_thread_id.get();
1022 Ref::map(ref_vector, |vector| &vector[thread])
1025 /// Load the thread clock set associated with the current thread
1026 /// mutably for modification
1028 fn current_thread_state_mut(&self) -> RefMut<'_, ThreadClockSet> {
1029 let ref_vector = self.thread_clocks.borrow_mut();
1030 let thread = self.current_thread_id.get();
1031 RefMut::map(ref_vector, |vector| &mut vector[thread])
1034 /// Return the current thread, should be the same
1035 /// as the data-race active thread
1037 fn current_thread(&self) -> ThreadId {
1038 self.current_thread_id.get()
1043 /// The size of the vector-clock to store inline
1044 /// clock vectors larger than this will be stored on the heap
1045 const SMALL_VECTOR: usize = 4;
1047 /// The type of the time-stamps recorded in the data-race detector
1048 /// set to a type of unsigned integer
1049 type Timestamp = u32;
1051 /// A vector clock for detecting data-races
1053 /// - the last element in a VClock must not be 0
1054 /// -- this means that derive(PartialEq & Eq) is correct
1055 /// -- as there is no implicit zero tail that might be equal
1056 /// -- also simplifies the implementation of PartialOrd
1057 #[derive(Clone, PartialEq, Eq, Default, Debug)]
1058 pub struct VClock(SmallVec<[Timestamp; SMALL_VECTOR]>);
1062 /// Load the backing slice behind the clock vector.
1064 fn as_slice(&self) -> &[Timestamp] {
1068 /// Get a mutable slice to the internal vector with minimum `min_len`
1069 /// elements, to preserve invariants this vector must modify
1070 /// the `min_len`-1 nth element to a non-zero value
1072 fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [Timestamp] {
1073 if self.0.len() < min_len {
1074 self.0.resize(min_len, 0);
1076 assert!(self.0.len() >= min_len);
1077 self.0.as_mut_slice()
1080 /// Increment the vector clock at a known index
1082 fn increment_index(&mut self, idx: usize) {
1083 let mut_slice = self.get_mut_with_min_len(idx + 1);
1084 let idx_ref = &mut mut_slice[idx];
1085 *idx_ref = idx_ref.checked_add(1).expect("Vector clock overflow")
1088 // Increment the vector element representing the progress
1089 // of execution in the given thread
1091 pub fn increment_thread(&mut self, thread: ThreadId) {
1092 self.increment_index(thread.to_u32() as usize);
1095 // Join the two vector-clocks together, this
1096 // sets each vector-element to the maximum value
1097 // of that element in either of the two source elements.
1098 pub fn join(&mut self, other: &Self) {
1099 let rhs_slice = other.as_slice();
1100 let lhs_slice = self.get_mut_with_min_len(rhs_slice.len());
1102 // Element-wise set to maximum.
1103 for (l, &r) in lhs_slice.iter_mut().zip(rhs_slice.iter()) {
1108 /// Joins with a thread at a known index
1109 fn set_at_index(&mut self, other: &Self, idx: usize){
1110 let mut_slice = self.get_mut_with_min_len(idx + 1);
1111 let slice = other.as_slice();
1112 mut_slice[idx] = slice[idx];
1115 /// Join with a threads vector clock only at the desired index
1116 /// returns true if the value updated
1118 pub fn set_at_thread(&mut self, other: &Self, thread: ThreadId){
1119 self.set_at_index(other, thread.to_u32() as usize);
1122 /// Clear the vector to all zeros, stored as an empty internal
1125 pub fn set_zero_vector(&mut self) {
1129 /// Set the values stored in this vector clock
1130 /// to the values stored in another.
1131 pub fn set_values(&mut self, new_value: &VClock) {
1132 let new_slice = new_value.as_slice();
1133 self.0.resize(new_slice.len(), 0);
1134 self.0.copy_from_slice(new_slice);
1139 impl PartialOrd for VClock {
1140 fn partial_cmp(&self, other: &VClock) -> Option<Ordering> {
1142 // Load the values as slices
1143 let lhs_slice = self.as_slice();
1144 let rhs_slice = other.as_slice();
1146 // Iterate through the combined vector slice
1147 // keeping track of the order that is currently possible to satisfy.
1148 // If an ordering relation is detected to be impossible, then bail and
1149 // directly return None
1150 let mut iter = lhs_slice.iter().zip(rhs_slice.iter());
1151 let mut order = match iter.next() {
1152 Some((lhs, rhs)) => lhs.cmp(rhs),
1153 None => Ordering::Equal
1155 for (l, r) in iter {
1157 Ordering::Equal => order = l.cmp(r),
1158 Ordering::Less => if l > r {
1161 Ordering::Greater => if l < r {
1167 //Now test if either left or right have trailing elements
1168 // by the invariant the trailing elements have at least 1
1169 // non zero value, so no additional calculation is required
1170 // to determine the result of the PartialOrder
1171 let l_len = lhs_slice.len();
1172 let r_len = rhs_slice.len();
1173 match l_len.cmp(&r_len) {
1174 // Equal has no additional elements: return current order
1175 Ordering::Equal => Some(order),
1176 // Right has at least 1 element > than the implicit 0,
1177 // so the only valid values are Ordering::Less or None
1178 Ordering::Less => match order {
1179 Ordering::Less | Ordering::Equal => Some(Ordering::Less),
1180 Ordering::Greater => None
1182 // Left has at least 1 element > than the implicit 0,
1183 // so the only valid values are Ordering::Greater or None
1184 Ordering::Greater => match order {
1185 Ordering::Greater | Ordering::Equal => Some(Ordering::Greater),
1186 Ordering::Less => None
1191 fn lt(&self, other: &VClock) -> bool {
1192 // Load the values as slices
1193 let lhs_slice = self.as_slice();
1194 let rhs_slice = other.as_slice();
1196 // If l_len > r_len then at least one element
1197 // in l_len is > than r_len, therefore the result
1198 // is either Some(Greater) or None, so return false
1200 let l_len = lhs_slice.len();
1201 let r_len = rhs_slice.len();
1203 // If any elements on the left are greater than the right
1204 // then the result is None or Some(Greater), both of which
1205 // return false, the earlier test asserts that no elements in the
1206 // extended tail violate this assumption. Otherwise l <= r, finally
1207 // the case where the values are potentially equal needs to be considered
1208 // and false returned as well
1209 let mut equal = l_len == r_len;
1210 for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
1223 fn le(&self, other: &VClock) -> bool {
1224 // Load the values as slices
1225 let lhs_slice = self.as_slice();
1226 let rhs_slice = other.as_slice();
1228 // If l_len > r_len then at least one element
1229 // in l_len is > than r_len, therefore the result
1230 // is either Some(Greater) or None, so return false
1232 let l_len = lhs_slice.len();
1233 let r_len = rhs_slice.len();
1235 // If any elements on the left are greater than the right
1236 // then the result is None or Some(Greater), both of which
1237 // return false, the earlier test asserts that no elements in the
1238 // extended tail violate this assumption. Otherwise l <= r
1239 !lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l > r)
1245 fn gt(&self, other: &VClock) -> bool {
1246 // Load the values as slices
1247 let lhs_slice = self.as_slice();
1248 let rhs_slice = other.as_slice();
1250 // If r_len > l_len then at least one element
1251 // in r_len is > than l_len, therefore the result
1252 // is either Some(Less) or None, so return false
1254 let l_len = lhs_slice.len();
1255 let r_len = rhs_slice.len();
1257 // If any elements on the left are less than the right
1258 // then the result is None or Some(Less), both of which
1259 // return false, the earlier test asserts that no elements in the
1260 // extended tail violate this assumption. Otherwise l >=, finally
1261 // the case where the values are potentially equal needs to be considered
1262 // and false returned as well
1263 let mut equal = l_len == r_len;
1264 for (&l, &r) in lhs_slice.iter().zip(rhs_slice.iter()) {
1277 fn ge(&self, other: &VClock) -> bool {
1278 // Load the values as slices
1279 let lhs_slice = self.as_slice();
1280 let rhs_slice = other.as_slice();
1282 // If r_len > l_len then at least one element
1283 // in r_len is > than l_len, therefore the result
1284 // is either Some(Less) or None, so return false
1286 let l_len = lhs_slice.len();
1287 let r_len = rhs_slice.len();
1289 // If any elements on the left are less than the right
1290 // then the result is None or Some(Less), both of which
1291 // return false, the earlier test asserts that no elements in the
1292 // extended tail violate this assumption. Otherwise l >= r
1293 !lhs_slice.iter().zip(rhs_slice.iter()).any(|(&l, &r)| l < r)
1300 impl Index<ThreadId> for VClock {
1301 type Output = Timestamp;
1304 fn index(&self, index: ThreadId) -> &Timestamp {
1305 self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
1310 /// Test vector clock ordering operations
1311 /// data-race detection is tested in the external
1315 use super::{VClock, Timestamp};
1316 use std::cmp::Ordering;
1320 let mut c1 = VClock::default();
1321 let mut c2 = VClock::default();
1323 c1.increment_index(5);
1325 c2.increment_index(53);
1327 c1.increment_index(53);
1329 c2.increment_index(5);
1334 fn test_partial_order() {
1336 assert_order(&[1], &[1], Some(Ordering::Equal));
1337 assert_order(&[1], &[2], Some(Ordering::Less));
1338 assert_order(&[2], &[1], Some(Ordering::Greater));
1339 assert_order(&[1], &[1,2], Some(Ordering::Less));
1340 assert_order(&[2], &[1,2], None);
1343 assert_order(&[400], &[0, 1], None);
1346 assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Equal));
1347 assert_order(&[0,1,2,3,4,5,6,7,8,9,10], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
1348 assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Greater));
1349 assert_order(&[0,1,2,3,4,5,6,7,8,9,11], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], None);
1350 assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,0,0], Some(Ordering::Less));
1351 assert_order(&[0,1,2,3,4,5,6,7,8,9,9 ], &[0,1,2,3,4,5,6,7,8,9,10,0,1,0], Some(Ordering::Less));
1354 fn from_slice(mut slice: &[Timestamp]) -> VClock {
1355 while let Some(0) = slice.last() {
1356 slice = &slice[..slice.len() - 1]
1358 VClock(smallvec::SmallVec::from_slice(slice))
1361 fn assert_order(l: &[Timestamp], r: &[Timestamp], o: Option<Ordering>) {
1362 let l = from_slice(l);
1363 let r = from_slice(r);
1366 let compare = l.partial_cmp(&r);
1367 assert_eq!(compare, o, "Invalid comparison\n l: {:?}\n r: {:?}",l,r);
1368 let alt_compare = r.partial_cmp(&l);
1369 assert_eq!(alt_compare, o.map(Ordering::reverse), "Invalid alt comparison\n l: {:?}\n r: {:?}",l,r);
1371 //Test operatorsm with faster implementations
1373 matches!(compare,Some(Ordering::Less)), l < r,
1374 "Invalid (<):\n l: {:?}\n r: {:?}",l,r
1377 matches!(compare,Some(Ordering::Less) | Some(Ordering::Equal)), l <= r,
1378 "Invalid (<=):\n l: {:?}\n r: {:?}",l,r
1381 matches!(compare,Some(Ordering::Greater)), l > r,
1382 "Invalid (>):\n l: {:?}\n r: {:?}",l,r
1385 matches!(compare,Some(Ordering::Greater) | Some(Ordering::Equal)), l >= r,
1386 "Invalid (>=):\n l: {:?}\n r: {:?}",l,r
1389 matches!(alt_compare,Some(Ordering::Less)), r < l,
1390 "Invalid alt (<):\n l: {:?}\n r: {:?}",l,r
1393 matches!(alt_compare,Some(Ordering::Less) | Some(Ordering::Equal)), r <= l,
1394 "Invalid alt (<=):\n l: {:?}\n r: {:?}",l,r
1397 matches!(alt_compare,Some(Ordering::Greater)), r > l,
1398 "Invalid alt (>):\n l: {:?}\n r: {:?}",l,r
1401 matches!(alt_compare,Some(Ordering::Greater) | Some(Ordering::Equal)), r >= l,
1402 "Invalid alt (>=):\n l: {:?}\n r: {:?}",l,r