]> git.lizzy.rs Git - rust.git/blob - src/sync.rs
Auto merge of #2430 - RalfJung:no-global-wrapper, r=RalfJung
[rust.git] / src / sync.rs
1 use std::collections::{hash_map::Entry, HashMap, VecDeque};
2 use std::num::NonZeroU32;
3 use std::ops::Not;
4
5 use log::trace;
6
7 use rustc_index::vec::{Idx, IndexVec};
8
9 use crate::*;
10
11 /// We cannot use the `newtype_index!` macro because we have to use 0 as a
12 /// sentinel value meaning that the identifier is not assigned. This is because
13 /// the pthreads static initializers initialize memory with zeros (see the
14 /// `src/shims/sync.rs` file).
15 macro_rules! declare_id {
16     ($name: ident) => {
17         /// 0 is used to indicate that the id was not yet assigned and,
18         /// therefore, is not a valid identifier.
19         #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
20         pub struct $name(NonZeroU32);
21
22         impl $name {
23             // Panics if `id == 0`.
24             pub fn from_u32(id: u32) -> Self {
25                 Self(NonZeroU32::new(id).unwrap())
26             }
27         }
28
29         impl Idx for $name {
30             fn new(idx: usize) -> Self {
31                 // We use 0 as a sentinel value (see the comment above) and,
32                 // therefore, need to shift by one when converting from an index
33                 // into a vector.
34                 let shifted_idx = u32::try_from(idx).unwrap().checked_add(1).unwrap();
35                 $name(NonZeroU32::new(shifted_idx).unwrap())
36             }
37             fn index(self) -> usize {
38                 // See the comment in `Self::new`.
39                 // (This cannot underflow because self is NonZeroU32.)
40                 usize::try_from(self.0.get() - 1).unwrap()
41             }
42         }
43
44         impl $name {
45             pub fn to_u32_scalar<'tcx>(&self) -> Scalar<Provenance> {
46                 Scalar::from_u32(self.0.get())
47             }
48         }
49     };
50 }
51
52 declare_id!(MutexId);
53
54 /// The mutex state.
55 #[derive(Default, Debug)]
56 struct Mutex {
57     /// The thread that currently owns the lock.
58     owner: Option<ThreadId>,
59     /// How many times the mutex was locked by the owner.
60     lock_count: usize,
61     /// The queue of threads waiting for this mutex.
62     queue: VecDeque<ThreadId>,
63     /// Data race handle, this tracks the happens-before
64     /// relationship between each mutex access. It is
65     /// released to during unlock and acquired from during
66     /// locking, and therefore stores the clock of the last
67     /// thread to release this mutex.
68     data_race: VClock,
69 }
70
71 declare_id!(RwLockId);
72
73 /// The read-write lock state.
74 #[derive(Default, Debug)]
75 struct RwLock {
76     /// The writer thread that currently owns the lock.
77     writer: Option<ThreadId>,
78     /// The readers that currently own the lock and how many times they acquired
79     /// the lock.
80     readers: HashMap<ThreadId, usize>,
81     /// The queue of writer threads waiting for this lock.
82     writer_queue: VecDeque<ThreadId>,
83     /// The queue of reader threads waiting for this lock.
84     reader_queue: VecDeque<ThreadId>,
85     /// Data race handle for writers, tracks the happens-before
86     /// ordering between each write access to a rwlock and is updated
87     /// after a sequence of concurrent readers to track the happens-
88     /// before ordering between the set of previous readers and
89     /// the current writer.
90     /// Contains the clock of the last thread to release a writer
91     /// lock or the joined clock of the set of last threads to release
92     /// shared reader locks.
93     data_race: VClock,
94     /// Data race handle for readers, this is temporary storage
95     /// for the combined happens-before ordering for between all
96     /// concurrent readers and the next writer, and the value
97     /// is stored to the main data_race variable once all
98     /// readers are finished.
99     /// Has to be stored separately since reader lock acquires
100     /// must load the clock of the last write and must not
101     /// add happens-before orderings between shared reader
102     /// locks.
103     data_race_reader: VClock,
104 }
105
106 declare_id!(CondvarId);
107
108 /// A thread waiting on a conditional variable.
109 #[derive(Debug)]
110 struct CondvarWaiter {
111     /// The thread that is waiting on this variable.
112     thread: ThreadId,
113     /// The mutex on which the thread is waiting.
114     mutex: MutexId,
115 }
116
117 /// The conditional variable state.
118 #[derive(Default, Debug)]
119 struct Condvar {
120     waiters: VecDeque<CondvarWaiter>,
121     /// Tracks the happens-before relationship
122     /// between a cond-var signal and a cond-var
123     /// wait during a non-suprious signal event.
124     /// Contains the clock of the last thread to
125     /// perform a futex-signal.
126     data_race: VClock,
127 }
128
129 /// The futex state.
130 #[derive(Default, Debug)]
131 struct Futex {
132     waiters: VecDeque<FutexWaiter>,
133     /// Tracks the happens-before relationship
134     /// between a futex-wake and a futex-wait
135     /// during a non-spurious wake event.
136     /// Contains the clock of the last thread to
137     /// perform a futex-wake.
138     data_race: VClock,
139 }
140
141 /// A thread waiting on a futex.
142 #[derive(Debug)]
143 struct FutexWaiter {
144     /// The thread that is waiting on this futex.
145     thread: ThreadId,
146     /// The bitset used by FUTEX_*_BITSET, or u32::MAX for other operations.
147     bitset: u32,
148 }
149
150 /// The state of all synchronization variables.
151 #[derive(Default, Debug)]
152 pub(super) struct SynchronizationState {
153     mutexes: IndexVec<MutexId, Mutex>,
154     rwlocks: IndexVec<RwLockId, RwLock>,
155     condvars: IndexVec<CondvarId, Condvar>,
156     futexes: HashMap<u64, Futex>,
157 }
158
159 // Private extension trait for local helper methods
160 impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
161 trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
162     /// Take a reader out of the queue waiting for the lock.
163     /// Returns `true` if some thread got the rwlock.
164     #[inline]
165     fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
166         let this = self.eval_context_mut();
167         if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
168             this.unblock_thread(reader);
169             this.rwlock_reader_lock(id, reader);
170             true
171         } else {
172             false
173         }
174     }
175
176     /// Take the writer out of the queue waiting for the lock.
177     /// Returns `true` if some thread got the rwlock.
178     #[inline]
179     fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
180         let this = self.eval_context_mut();
181         if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
182             this.unblock_thread(writer);
183             this.rwlock_writer_lock(id, writer);
184             true
185         } else {
186             false
187         }
188     }
189
190     /// Take a thread out of the queue waiting for the mutex, and lock
191     /// the mutex for it. Returns `true` if some thread has the mutex now.
192     #[inline]
193     fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
194         let this = self.eval_context_mut();
195         if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
196             this.unblock_thread(thread);
197             this.mutex_lock(id, thread);
198             true
199         } else {
200             false
201         }
202     }
203 }
204
205 // Public interface to synchronization primitives. Please note that in most
206 // cases, the function calls are infallible and it is the client's (shim
207 // implementation's) responsibility to detect and deal with erroneous
208 // situations.
209 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
210 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
211     #[inline]
212     /// Create state for a new mutex.
213     fn mutex_create(&mut self) -> MutexId {
214         let this = self.eval_context_mut();
215         this.machine.threads.sync.mutexes.push(Default::default())
216     }
217
218     #[inline]
219     /// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
220     /// otherwise returns the value from the closure
221     fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
222     where
223         F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
224     {
225         let this = self.eval_context_mut();
226         let next_index = this.machine.threads.sync.mutexes.next_index();
227         if let Some(old) = existing(this, next_index)? {
228             Ok(old)
229         } else {
230             let new_index = this.machine.threads.sync.mutexes.push(Default::default());
231             assert_eq!(next_index, new_index);
232             Ok(new_index)
233         }
234     }
235
236     #[inline]
237     /// Get the id of the thread that currently owns this lock.
238     fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
239         let this = self.eval_context_ref();
240         this.machine.threads.sync.mutexes[id].owner.unwrap()
241     }
242
243     #[inline]
244     /// Check if locked.
245     fn mutex_is_locked(&self, id: MutexId) -> bool {
246         let this = self.eval_context_ref();
247         this.machine.threads.sync.mutexes[id].owner.is_some()
248     }
249
250     /// Lock by setting the mutex owner and increasing the lock count.
251     fn mutex_lock(&mut self, id: MutexId, thread: ThreadId) {
252         let this = self.eval_context_mut();
253         let mutex = &mut this.machine.threads.sync.mutexes[id];
254         if let Some(current_owner) = mutex.owner {
255             assert_eq!(thread, current_owner, "mutex already locked by another thread");
256             assert!(
257                 mutex.lock_count > 0,
258                 "invariant violation: lock_count == 0 iff the thread is unlocked"
259             );
260         } else {
261             mutex.owner = Some(thread);
262         }
263         mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
264         if let Some(data_race) = &this.machine.data_race {
265             data_race.validate_lock_acquire(&mutex.data_race, thread);
266         }
267     }
268
269     /// Try unlocking by decreasing the lock count and returning the old lock
270     /// count. If the lock count reaches 0, release the lock and potentially
271     /// give to a new owner. If the lock was not locked by `expected_owner`,
272     /// return `None`.
273     fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize> {
274         let this = self.eval_context_mut();
275         let mutex = &mut this.machine.threads.sync.mutexes[id];
276         if let Some(current_owner) = mutex.owner {
277             // Mutex is locked.
278             if current_owner != expected_owner {
279                 // Only the owner can unlock the mutex.
280                 return None;
281             }
282             let old_lock_count = mutex.lock_count;
283             mutex.lock_count = old_lock_count
284                 .checked_sub(1)
285                 .expect("invariant violation: lock_count == 0 iff the thread is unlocked");
286             if mutex.lock_count == 0 {
287                 mutex.owner = None;
288                 // The mutex is completely unlocked. Try transfering ownership
289                 // to another thread.
290                 if let Some(data_race) = &this.machine.data_race {
291                     data_race.validate_lock_release(&mut mutex.data_race, current_owner);
292                 }
293                 this.mutex_dequeue_and_lock(id);
294             }
295             Some(old_lock_count)
296         } else {
297             // Mutex is not locked.
298             None
299         }
300     }
301
302     #[inline]
303     /// Put the thread into the queue waiting for the mutex.
304     fn mutex_enqueue_and_block(&mut self, id: MutexId, thread: ThreadId) {
305         let this = self.eval_context_mut();
306         assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
307         this.machine.threads.sync.mutexes[id].queue.push_back(thread);
308         this.block_thread(thread);
309     }
310
311     #[inline]
312     /// Create state for a new read write lock.
313     fn rwlock_create(&mut self) -> RwLockId {
314         let this = self.eval_context_mut();
315         this.machine.threads.sync.rwlocks.push(Default::default())
316     }
317
318     #[inline]
319     /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
320     /// otherwise returns the value from the closure
321     fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
322     where
323         F: FnOnce(
324             &mut MiriEvalContext<'mir, 'tcx>,
325             RwLockId,
326         ) -> InterpResult<'tcx, Option<RwLockId>>,
327     {
328         let this = self.eval_context_mut();
329         let next_index = this.machine.threads.sync.rwlocks.next_index();
330         if let Some(old) = existing(this, next_index)? {
331             Ok(old)
332         } else {
333             let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
334             assert_eq!(next_index, new_index);
335             Ok(new_index)
336         }
337     }
338
339     #[inline]
340     /// Check if locked.
341     fn rwlock_is_locked(&self, id: RwLockId) -> bool {
342         let this = self.eval_context_ref();
343         let rwlock = &this.machine.threads.sync.rwlocks[id];
344         trace!(
345             "rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
346             id,
347             rwlock.writer,
348             rwlock.readers.len(),
349         );
350         rwlock.writer.is_some() || rwlock.readers.is_empty().not()
351     }
352
353     #[inline]
354     /// Check if write locked.
355     fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
356         let this = self.eval_context_ref();
357         let rwlock = &this.machine.threads.sync.rwlocks[id];
358         trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
359         rwlock.writer.is_some()
360     }
361
362     /// Read-lock the lock by adding the `reader` the list of threads that own
363     /// this lock.
364     fn rwlock_reader_lock(&mut self, id: RwLockId, reader: ThreadId) {
365         let this = self.eval_context_mut();
366         assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
367         trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
368         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
369         let count = rwlock.readers.entry(reader).or_insert(0);
370         *count = count.checked_add(1).expect("the reader counter overflowed");
371         if let Some(data_race) = &this.machine.data_race {
372             data_race.validate_lock_acquire(&rwlock.data_race, reader);
373         }
374     }
375
376     /// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
377     /// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
378     fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
379         let this = self.eval_context_mut();
380         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
381         match rwlock.readers.entry(reader) {
382             Entry::Occupied(mut entry) => {
383                 let count = entry.get_mut();
384                 assert!(*count > 0, "rwlock locked with count == 0");
385                 *count -= 1;
386                 if *count == 0 {
387                     trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, reader);
388                     entry.remove();
389                 } else {
390                     trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, reader);
391                 }
392             }
393             Entry::Vacant(_) => return false, // we did not even own this lock
394         }
395         if let Some(data_race) = &this.machine.data_race {
396             data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
397         }
398
399         // The thread was a reader. If the lock is not held any more, give it to a writer.
400         if this.rwlock_is_locked(id).not() {
401             // All the readers are finished, so set the writer data-race handle to the value
402             //  of the union of all reader data race handles, since the set of readers
403             //  happen-before the writers
404             let rwlock = &mut this.machine.threads.sync.rwlocks[id];
405             rwlock.data_race.clone_from(&rwlock.data_race_reader);
406             this.rwlock_dequeue_and_lock_writer(id);
407         }
408         true
409     }
410
411     #[inline]
412     /// Put the reader in the queue waiting for the lock and block it.
413     fn rwlock_enqueue_and_block_reader(&mut self, id: RwLockId, reader: ThreadId) {
414         let this = self.eval_context_mut();
415         assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
416         this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
417         this.block_thread(reader);
418     }
419
420     #[inline]
421     /// Lock by setting the writer that owns the lock.
422     fn rwlock_writer_lock(&mut self, id: RwLockId, writer: ThreadId) {
423         let this = self.eval_context_mut();
424         assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
425         trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
426         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
427         rwlock.writer = Some(writer);
428         if let Some(data_race) = &this.machine.data_race {
429             data_race.validate_lock_acquire(&rwlock.data_race, writer);
430         }
431     }
432
433     #[inline]
434     /// Try to unlock by removing the writer.
435     fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
436         let this = self.eval_context_mut();
437         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
438         if let Some(current_writer) = rwlock.writer {
439             if current_writer != expected_writer {
440                 // Only the owner can unlock the rwlock.
441                 return false;
442             }
443             rwlock.writer = None;
444             trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
445             // Release memory to both reader and writer vector clocks
446             //  since this writer happens-before both the union of readers once they are finished
447             //  and the next writer
448             if let Some(data_race) = &this.machine.data_race {
449                 data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
450                 data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
451             }
452             // The thread was a writer.
453             //
454             // We are prioritizing writers here against the readers. As a
455             // result, not only readers can starve writers, but also writers can
456             // starve readers.
457             if this.rwlock_dequeue_and_lock_writer(id) {
458                 // Someone got the write lock, nice.
459             } else {
460                 // Give the lock to all readers.
461                 while this.rwlock_dequeue_and_lock_reader(id) {
462                     // Rinse and repeat.
463                 }
464             }
465             true
466         } else {
467             false
468         }
469     }
470
471     #[inline]
472     /// Put the writer in the queue waiting for the lock.
473     fn rwlock_enqueue_and_block_writer(&mut self, id: RwLockId, writer: ThreadId) {
474         let this = self.eval_context_mut();
475         assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
476         this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
477         this.block_thread(writer);
478     }
479
480     #[inline]
481     /// Create state for a new conditional variable.
482     fn condvar_create(&mut self) -> CondvarId {
483         let this = self.eval_context_mut();
484         this.machine.threads.sync.condvars.push(Default::default())
485     }
486
487     #[inline]
488     /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
489     /// otherwise returns the value from the closure
490     fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
491     where
492         F: FnOnce(
493             &mut MiriEvalContext<'mir, 'tcx>,
494             CondvarId,
495         ) -> InterpResult<'tcx, Option<CondvarId>>,
496     {
497         let this = self.eval_context_mut();
498         let next_index = this.machine.threads.sync.condvars.next_index();
499         if let Some(old) = existing(this, next_index)? {
500             Ok(old)
501         } else {
502             let new_index = this.machine.threads.sync.condvars.push(Default::default());
503             assert_eq!(next_index, new_index);
504             Ok(new_index)
505         }
506     }
507
508     #[inline]
509     /// Is the conditional variable awaited?
510     fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
511         let this = self.eval_context_mut();
512         !this.machine.threads.sync.condvars[id].waiters.is_empty()
513     }
514
515     /// Mark that the thread is waiting on the conditional variable.
516     fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, mutex: MutexId) {
517         let this = self.eval_context_mut();
518         let waiters = &mut this.machine.threads.sync.condvars[id].waiters;
519         assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
520         waiters.push_back(CondvarWaiter { thread, mutex });
521     }
522
523     /// Wake up some thread (if there is any) sleeping on the conditional
524     /// variable.
525     fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
526         let this = self.eval_context_mut();
527         let current_thread = this.get_active_thread();
528         let condvar = &mut this.machine.threads.sync.condvars[id];
529         let data_race = &this.machine.data_race;
530
531         // Each condvar signal happens-before the end of the condvar wake
532         if let Some(data_race) = data_race {
533             data_race.validate_lock_release(&mut condvar.data_race, current_thread);
534         }
535         condvar.waiters.pop_front().map(|waiter| {
536             if let Some(data_race) = data_race {
537                 data_race.validate_lock_acquire(&condvar.data_race, waiter.thread);
538             }
539             (waiter.thread, waiter.mutex)
540         })
541     }
542
543     #[inline]
544     /// Remove the thread from the queue of threads waiting on this conditional variable.
545     fn condvar_remove_waiter(&mut self, id: CondvarId, thread: ThreadId) {
546         let this = self.eval_context_mut();
547         this.machine.threads.sync.condvars[id].waiters.retain(|waiter| waiter.thread != thread);
548     }
549
550     fn futex_wait(&mut self, addr: u64, thread: ThreadId, bitset: u32) {
551         let this = self.eval_context_mut();
552         let futex = &mut this.machine.threads.sync.futexes.entry(addr).or_default();
553         let waiters = &mut futex.waiters;
554         assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
555         waiters.push_back(FutexWaiter { thread, bitset });
556     }
557
558     fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId> {
559         let this = self.eval_context_mut();
560         let current_thread = this.get_active_thread();
561         let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
562         let data_race = &this.machine.data_race;
563
564         // Each futex-wake happens-before the end of the futex wait
565         if let Some(data_race) = data_race {
566             data_race.validate_lock_release(&mut futex.data_race, current_thread);
567         }
568
569         // Wake up the first thread in the queue that matches any of the bits in the bitset.
570         futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| {
571             let waiter = futex.waiters.remove(i).unwrap();
572             if let Some(data_race) = data_race {
573                 data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
574             }
575             waiter.thread
576         })
577     }
578
579     fn futex_remove_waiter(&mut self, addr: u64, thread: ThreadId) {
580         let this = self.eval_context_mut();
581         if let Some(futex) = this.machine.threads.sync.futexes.get_mut(&addr) {
582             futex.waiters.retain(|waiter| waiter.thread != thread);
583         }
584     }
585 }