]> git.lizzy.rs Git - rust.git/blob - src/tools/miri/src/concurrency/sync.rs
Rollup merge of #106958 - jyn514:labels, r=m-ou-se
[rust.git] / src / tools / miri / src / concurrency / sync.rs
1 use std::collections::{hash_map::Entry, VecDeque};
2 use std::num::NonZeroU32;
3 use std::ops::Not;
4
5 use log::trace;
6
7 use rustc_data_structures::fx::FxHashMap;
8 use rustc_index::vec::{Idx, IndexVec};
9
10 use super::init_once::InitOnce;
11 use super::vector_clock::VClock;
12 use crate::*;
13
14 pub trait SyncId {
15     fn from_u32(id: u32) -> Self;
16     fn to_u32(&self) -> u32;
17 }
18
19 /// We cannot use the `newtype_index!` macro because we have to use 0 as a
20 /// sentinel value meaning that the identifier is not assigned. This is because
21 /// the pthreads static initializers initialize memory with zeros (see the
22 /// `src/shims/sync.rs` file).
23 macro_rules! declare_id {
24     ($name: ident) => {
25         /// 0 is used to indicate that the id was not yet assigned and,
26         /// therefore, is not a valid identifier.
27         #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
28         pub struct $name(NonZeroU32);
29
30         impl SyncId for $name {
31             // Panics if `id == 0`.
32             fn from_u32(id: u32) -> Self {
33                 Self(NonZeroU32::new(id).unwrap())
34             }
35             fn to_u32(&self) -> u32 {
36                 self.0.get()
37             }
38         }
39
40         impl Idx for $name {
41             fn new(idx: usize) -> Self {
42                 // We use 0 as a sentinel value (see the comment above) and,
43                 // therefore, need to shift by one when converting from an index
44                 // into a vector.
45                 let shifted_idx = u32::try_from(idx).unwrap().checked_add(1).unwrap();
46                 $name(NonZeroU32::new(shifted_idx).unwrap())
47             }
48             fn index(self) -> usize {
49                 // See the comment in `Self::new`.
50                 // (This cannot underflow because self is NonZeroU32.)
51                 usize::try_from(self.0.get() - 1).unwrap()
52             }
53         }
54
55         impl $name {
56             pub fn to_u32_scalar(&self) -> Scalar<Provenance> {
57                 Scalar::from_u32(self.0.get())
58             }
59         }
60     };
61 }
62
63 declare_id!(MutexId);
64
65 /// The mutex state.
66 #[derive(Default, Debug)]
67 struct Mutex {
68     /// The thread that currently owns the lock.
69     owner: Option<ThreadId>,
70     /// How many times the mutex was locked by the owner.
71     lock_count: usize,
72     /// The queue of threads waiting for this mutex.
73     queue: VecDeque<ThreadId>,
74     /// Data race handle, this tracks the happens-before
75     /// relationship between each mutex access. It is
76     /// released to during unlock and acquired from during
77     /// locking, and therefore stores the clock of the last
78     /// thread to release this mutex.
79     data_race: VClock,
80 }
81
82 declare_id!(RwLockId);
83
84 /// The read-write lock state.
85 #[derive(Default, Debug)]
86 struct RwLock {
87     /// The writer thread that currently owns the lock.
88     writer: Option<ThreadId>,
89     /// The readers that currently own the lock and how many times they acquired
90     /// the lock.
91     readers: FxHashMap<ThreadId, usize>,
92     /// The queue of writer threads waiting for this lock.
93     writer_queue: VecDeque<ThreadId>,
94     /// The queue of reader threads waiting for this lock.
95     reader_queue: VecDeque<ThreadId>,
96     /// Data race handle for writers, tracks the happens-before
97     /// ordering between each write access to a rwlock and is updated
98     /// after a sequence of concurrent readers to track the happens-
99     /// before ordering between the set of previous readers and
100     /// the current writer.
101     /// Contains the clock of the last thread to release a writer
102     /// lock or the joined clock of the set of last threads to release
103     /// shared reader locks.
104     data_race: VClock,
105     /// Data race handle for readers, this is temporary storage
106     /// for the combined happens-before ordering for between all
107     /// concurrent readers and the next writer, and the value
108     /// is stored to the main data_race variable once all
109     /// readers are finished.
110     /// Has to be stored separately since reader lock acquires
111     /// must load the clock of the last write and must not
112     /// add happens-before orderings between shared reader
113     /// locks.
114     data_race_reader: VClock,
115 }
116
117 declare_id!(CondvarId);
118
119 #[derive(Debug, Copy, Clone)]
120 pub enum RwLockMode {
121     Read,
122     Write,
123 }
124
125 #[derive(Debug)]
126 pub enum CondvarLock {
127     Mutex(MutexId),
128     RwLock { id: RwLockId, mode: RwLockMode },
129 }
130
131 /// A thread waiting on a conditional variable.
132 #[derive(Debug)]
133 struct CondvarWaiter {
134     /// The thread that is waiting on this variable.
135     thread: ThreadId,
136     /// The mutex or rwlock on which the thread is waiting.
137     lock: CondvarLock,
138 }
139
140 /// The conditional variable state.
141 #[derive(Default, Debug)]
142 struct Condvar {
143     waiters: VecDeque<CondvarWaiter>,
144     /// Tracks the happens-before relationship
145     /// between a cond-var signal and a cond-var
146     /// wait during a non-suprious signal event.
147     /// Contains the clock of the last thread to
148     /// perform a futex-signal.
149     data_race: VClock,
150 }
151
152 /// The futex state.
153 #[derive(Default, Debug)]
154 struct Futex {
155     waiters: VecDeque<FutexWaiter>,
156     /// Tracks the happens-before relationship
157     /// between a futex-wake and a futex-wait
158     /// during a non-spurious wake event.
159     /// Contains the clock of the last thread to
160     /// perform a futex-wake.
161     data_race: VClock,
162 }
163
164 /// A thread waiting on a futex.
165 #[derive(Debug)]
166 struct FutexWaiter {
167     /// The thread that is waiting on this futex.
168     thread: ThreadId,
169     /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
170     bitset: u32,
171 }
172
173 /// The state of all synchronization variables.
174 #[derive(Default, Debug)]
175 pub(crate) struct SynchronizationState<'mir, 'tcx> {
176     mutexes: IndexVec<MutexId, Mutex>,
177     rwlocks: IndexVec<RwLockId, RwLock>,
178     condvars: IndexVec<CondvarId, Condvar>,
179     futexes: FxHashMap<u64, Futex>,
180     pub(super) init_onces: IndexVec<InitOnceId, InitOnce<'mir, 'tcx>>,
181 }
182
183 impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> {
184     fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
185         for init_once in self.init_onces.iter() {
186             init_once.visit_tags(visit);
187         }
188     }
189 }
190
191 // Private extension trait for local helper methods
192 impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
193 pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>:
194     crate::MiriInterpCxExt<'mir, 'tcx>
195 {
196     /// Lazily initialize the ID of this Miri sync structure.
197     /// ('0' indicates uninit.)
198     #[inline]
199     fn get_or_create_id<Id: SyncId>(
200         &mut self,
201         next_id: Id,
202         lock_op: &OpTy<'tcx, Provenance>,
203         offset: u64,
204     ) -> InterpResult<'tcx, Option<Id>> {
205         let this = self.eval_context_mut();
206         let value_place =
207             this.deref_operand_and_offset(lock_op, offset, this.machine.layouts.u32)?;
208
209         // Since we are lazy, this update has to be atomic.
210         let (old, success) = this
211             .atomic_compare_exchange_scalar(
212                 &value_place,
213                 &ImmTy::from_uint(0u32, this.machine.layouts.u32),
214                 Scalar::from_u32(next_id.to_u32()),
215                 AtomicRwOrd::Relaxed, // deliberately *no* synchronization
216                 AtomicReadOrd::Relaxed,
217                 false,
218             )?
219             .to_scalar_pair();
220
221         Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
222             // Caller of the closure needs to allocate next_id
223             None
224         } else {
225             Some(Id::from_u32(old.to_u32().expect("layout is u32")))
226         })
227     }
228
229     /// Take a reader out of the queue waiting for the lock.
230     /// Returns `true` if some thread got the rwlock.
231     #[inline]
232     fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
233         let this = self.eval_context_mut();
234         if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
235             this.unblock_thread(reader);
236             this.rwlock_reader_lock(id, reader);
237             true
238         } else {
239             false
240         }
241     }
242
243     /// Take the writer out of the queue waiting for the lock.
244     /// Returns `true` if some thread got the rwlock.
245     #[inline]
246     fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
247         let this = self.eval_context_mut();
248         if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
249             this.unblock_thread(writer);
250             this.rwlock_writer_lock(id, writer);
251             true
252         } else {
253             false
254         }
255     }
256
257     /// Take a thread out of the queue waiting for the mutex, and lock
258     /// the mutex for it. Returns `true` if some thread has the mutex now.
259     #[inline]
260     fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
261         let this = self.eval_context_mut();
262         if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
263             this.unblock_thread(thread);
264             this.mutex_lock(id, thread);
265             true
266         } else {
267             false
268         }
269     }
270 }
271
272 // Public interface to synchronization primitives. Please note that in most
273 // cases, the function calls are infallible and it is the client's (shim
274 // implementation's) responsibility to detect and deal with erroneous
275 // situations.
276 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
277 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
278     fn mutex_get_or_create_id(
279         &mut self,
280         lock_op: &OpTy<'tcx, Provenance>,
281         offset: u64,
282     ) -> InterpResult<'tcx, MutexId> {
283         let this = self.eval_context_mut();
284         this.mutex_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
285     }
286
287     fn rwlock_get_or_create_id(
288         &mut self,
289         lock_op: &OpTy<'tcx, Provenance>,
290         offset: u64,
291     ) -> InterpResult<'tcx, RwLockId> {
292         let this = self.eval_context_mut();
293         this.rwlock_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
294     }
295
296     fn condvar_get_or_create_id(
297         &mut self,
298         lock_op: &OpTy<'tcx, Provenance>,
299         offset: u64,
300     ) -> InterpResult<'tcx, CondvarId> {
301         let this = self.eval_context_mut();
302         this.condvar_get_or_create(|ecx, next_id| ecx.get_or_create_id(next_id, lock_op, offset))
303     }
304
305     #[inline]
306     /// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
307     /// otherwise returns the value from the closure
308     fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
309     where
310         F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
311     {
312         let this = self.eval_context_mut();
313         let next_index = this.machine.threads.sync.mutexes.next_index();
314         if let Some(old) = existing(this, next_index)? {
315             Ok(old)
316         } else {
317             let new_index = this.machine.threads.sync.mutexes.push(Default::default());
318             assert_eq!(next_index, new_index);
319             Ok(new_index)
320         }
321     }
322
323     #[inline]
324     /// Get the id of the thread that currently owns this lock.
325     fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
326         let this = self.eval_context_ref();
327         this.machine.threads.sync.mutexes[id].owner.unwrap()
328     }
329
330     #[inline]
331     /// Check if locked.
332     fn mutex_is_locked(&self, id: MutexId) -> bool {
333         let this = self.eval_context_ref();
334         this.machine.threads.sync.mutexes[id].owner.is_some()
335     }
336
337     /// Lock by setting the mutex owner and increasing the lock count.
338     fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) {
339         let this = self.eval_context_mut();
340         let mutex = &mut this.machine.threads.sync.mutexes[id];
341         if let Some(current_owner) = mutex.owner {
342             assert_eq!(thread, current_owner, "mutex already locked by another thread");
343             assert!(
344                 mutex.lock_count > 0,
345                 "invariant violation: lock_count == 0 iff the thread is unlocked"
346             );
347         } else {
348             mutex.owner = Some(thread);
349         }
350         mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
351         if let Some(data_race) = &this.machine.data_race {
352             data_race.validate_lock_acquire(&mutex.data_race, thread);
353         }
354     }
355
356     /// Try unlocking by decreasing the lock count and returning the old lock
357     /// count. If the lock count reaches 0, release the lock and potentially
358     /// give to a new owner. If the lock was not locked by `expected_owner`,
359     /// return `None`.
360     fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize> {
361         let this = self.eval_context_mut();
362         let current_span = this.machine.current_span();
363         let mutex = &mut this.machine.threads.sync.mutexes[id];
364         if let Some(current_owner) = mutex.owner {
365             // Mutex is locked.
366             if current_owner != expected_owner {
367                 // Only the owner can unlock the mutex.
368                 return None;
369             }
370             let old_lock_count = mutex.lock_count;
371             mutex.lock_count = old_lock_count
372                 .checked_sub(1)
373                 .expect("invariant violation: lock_count == 0 iff the thread is unlocked");
374             if mutex.lock_count == 0 {
375                 mutex.owner = None;
376                 // The mutex is completely unlocked. Try transfering ownership
377                 // to another thread.
378                 if let Some(data_race) = &this.machine.data_race {
379                     data_race.validate_lock_release(
380                         &mut mutex.data_race,
381                         current_owner,
382                         current_span,
383                     );
384                 }
385                 this.mutex_dequeue_and_lock(id);
386             }
387             Some(old_lock_count)
388         } else {
389             // Mutex is not locked.
390             None
391         }
392     }
393
394     /// Put the thread into the queue waiting for the mutex.
395     #[inline]
396     fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
397         let this = self.eval_context_mut();
398         assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
399         this.machine.threads.sync.mutexes[id].queue.push_back(thread);
400         this.block_thread(thread);
401     }
402
403     /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
404     /// otherwise returns the value from the closure
405     #[inline]
406     fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
407     where
408         F: FnOnce(&mut MiriInterpCx<'mir, 'tcx>, RwLockId) -> InterpResult<'tcx, Option<RwLockId>>,
409     {
410         let this = self.eval_context_mut();
411         let next_index = this.machine.threads.sync.rwlocks.next_index();
412         if let Some(old) = existing(this, next_index)? {
413             Ok(old)
414         } else {
415             let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
416             assert_eq!(next_index, new_index);
417             Ok(new_index)
418         }
419     }
420
421     #[inline]
422     /// Check if locked.
423     fn rwlock_is_locked(&self, id: RwLockId) -> bool {
424         let this = self.eval_context_ref();
425         let rwlock = &this.machine.threads.sync.rwlocks[id];
426         trace!(
427             "rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
428             id,
429             rwlock.writer,
430             rwlock.readers.len(),
431         );
432         rwlock.writer.is_some() || rwlock.readers.is_empty().not()
433     }
434
435     /// Check if write locked.
436     #[inline]
437     fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
438         let this = self.eval_context_ref();
439         let rwlock = &this.machine.threads.sync.rwlocks[id];
440         trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
441         rwlock.writer.is_some()
442     }
443
444     /// Read-lock the lock by adding the `reader` the list of threads that own
445     /// this lock.
446     fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
447         let this = self.eval_context_mut();
448         assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
449         trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
450         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
451         let count = rwlock.readers.entry(reader).or_insert(0);
452         *count = count.checked_add(1).expect("the reader counter overflowed");
453         if let Some(data_race) = &this.machine.data_race {
454             data_race.validate_lock_acquire(&rwlock.data_race, reader);
455         }
456     }
457
458     /// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
459     /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
460     fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
461         let this = self.eval_context_mut();
462         let current_span = this.machine.current_span();
463         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
464         match rwlock.readers.entry(reader) {
465             Entry::Occupied(mut entry) => {
466                 let count = entry.get_mut();
467                 assert!(*count > 0, "rwlock locked with count == 0");
468                 *count -= 1;
469                 if *count == 0 {
470                     trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, reader);
471                     entry.remove();
472                 } else {
473                     trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, reader);
474                 }
475             }
476             Entry::Vacant(_) => return false, // we did not even own this lock
477         }
478         if let Some(data_race) = &this.machine.data_race {
479             data_race.validate_lock_release_shared(
480                 &mut rwlock.data_race_reader,
481                 reader,
482                 current_span,
483             );
484         }
485
486         // The thread was a reader. If the lock is not held any more, give it to a writer.
487         if this.rwlock_is_locked(id).not() {
488             // All the readers are finished, so set the writer data-race handle to the value
489             //  of the union of all reader data race handles, since the set of readers
490             //  happen-before the writers
491             let rwlock = &mut this.machine.threads.sync.rwlocks[id];
492             rwlock.data_race.clone_from(&rwlock.data_race_reader);
493             this.rwlock_dequeue_and_lock_writer(id);
494         }
495         true
496     }
497
498     /// Put the reader in the queue waiting for the lock and block it.
499     #[inline]
500     fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
501         let this = self.eval_context_mut();
502         assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
503         this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
504         this.block_thread(reader);
505     }
506
507     /// Lock by setting the writer that owns the lock.
508     #[inline]
509     fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
510         let this = self.eval_context_mut();
511         assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
512         trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
513         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
514         rwlock.writer = Some(writer);
515         if let Some(data_race) = &this.machine.data_race {
516             data_race.validate_lock_acquire(&rwlock.data_race, writer);
517         }
518     }
519
520     /// Try to unlock by removing the writer.
521     #[inline]
522     fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
523         let this = self.eval_context_mut();
524         let current_span = this.machine.current_span();
525         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
526         if let Some(current_writer) = rwlock.writer {
527             if current_writer != expected_writer {
528                 // Only the owner can unlock the rwlock.
529                 return false;
530             }
531             rwlock.writer = None;
532             trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
533             // Release memory to both reader and writer vector clocks
534             //  since this writer happens-before both the union of readers once they are finished
535             //  and the next writer
536             if let Some(data_race) = &this.machine.data_race {
537                 data_race.validate_lock_release(
538                     &mut rwlock.data_race,
539                     current_writer,
540                     current_span,
541                 );
542                 data_race.validate_lock_release(
543                     &mut rwlock.data_race_reader,
544                     current_writer,
545                     current_span,
546                 );
547             }
548             // The thread was a writer.
549             //
550             // We are prioritizing writers here against the readers. As a
551             // result, not only readers can starve writers, but also writers can
552             // starve readers.
553             if this.rwlock_dequeue_and_lock_writer(id) {
554                 // Someone got the write lock, nice.
555             } else {
556                 // Give the lock to all readers.
557                 while this.rwlock_dequeue_and_lock_reader(id) {
558                     // Rinse and repeat.
559                 }
560             }
561             true
562         } else {
563             false
564         }
565     }
566
567     /// Put the writer in the queue waiting for the lock.
568     #[inline]
569     fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
570         let this = self.eval_context_mut();
571         assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
572         this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
573         this.block_thread(writer);
574     }
575
576     /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
577     /// otherwise returns the value from the closure
578     #[inline]
579     fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
580     where
581         F: FnOnce(
582             &mut MiriInterpCx<'mir, 'tcx>,
583             CondvarId,
584         ) -> InterpResult<'tcx, Option<CondvarId>>,
585     {
586         let this = self.eval_context_mut();
587         let next_index = this.machine.threads.sync.condvars.next_index();
588         if let Some(old) = existing(this, next_index)? {
589             Ok(old)
590         } else {
591             let new_index = this.machine.threads.sync.condvars.push(Default::default());
592             assert_eq!(next_index, new_index);
593             Ok(new_index)
594         }
595     }
596
597     /// Is the conditional variable awaited?
598     #[inline]
599     fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
600         let this = self.eval_context_mut();
601         !this.machine.threads.sync.condvars[id].waiters.is_empty()
602     }
603
604     /// Mark that the thread is waiting on the conditional variable.
605     fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: CondvarLock) {
606         let this = self.eval_context_mut();
607         let waiters = &mut this.machine.threads.sync.condvars[id].waiters;
608         assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
609         waiters.push_back(CondvarWaiter { thread, lock });
610     }
611
612     /// Wake up some thread (if there is any) sleeping on the conditional
613     /// variable.
614     fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)> {
615         let this = self.eval_context_mut();
616         let current_thread = this.get_active_thread();
617         let current_span = this.machine.current_span();
618         let condvar = &mut this.machine.threads.sync.condvars[id];
619         let data_race = &this.machine.data_race;
620
621         // Each condvar signal happens-before the end of the condvar wake
622         if let Some(data_race) = data_race {
623             data_race.validate_lock_release(&mut condvar.data_race, current_thread, current_span);
624         }
625         condvar.waiters.pop_front().map(|waiter| {
626             if let Some(data_race) = data_race {
627                 data_race.validate_lock_acquire(&condvar.data_race, waiter.thread);
628             }
629             (waiter.thread, waiter.lock)
630         })
631     }
632
633     #[inline]
634     /// Remove the thread from the queue of threads waiting on this conditional variable.
635     fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId) {
636         let this = self.eval_context_mut();
637         this.machine.threads.sync.condvars[id].waiters.retain(|waiter| waiter.thread != thread);
638     }
639
640     fn futex_wait(&mut self, addr: u64, thread: ThreadId, bitset: u32) {
641         let this = self.eval_context_mut();
642         let futex = &mut this.machine.threads.sync.futexes.entry(addr).or_default();
643         let waiters = &mut futex.waiters;
644         assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
645         waiters.push_back(FutexWaiter { thread, bitset });
646     }
647
648     fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId> {
649         let this = self.eval_context_mut();
650         let current_thread = this.get_active_thread();
651         let current_span = this.machine.current_span();
652         let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
653         let data_race = &this.machine.data_race;
654
655         // Each futex-wake happens-before the end of the futex wait
656         if let Some(data_race) = data_race {
657             data_race.validate_lock_release(&mut futex.data_race, current_thread, current_span);
658         }
659
660         // Wake up the first thread in the queue that matches any of the bits in the bitset.
661         futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| {
662             let waiter = futex.waiters.remove(i).unwrap();
663             if let Some(data_race) = data_race {
664                 data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
665             }
666             waiter.thread
667         })
668     }
669
670     fn futex_remove_waiter(&mut self, addr: u64, thread: ThreadId) {
671         let this = self.eval_context_mut();
672         if let Some(futex) = this.machine.threads.sync.futexes.get_mut(&addr) {
673             futex.waiters.retain(|waiter| waiter.thread != thread);
674         }
675     }
676 }