1 use crate::cell::{Cell, UnsafeCell};
2 use crate::sync::atomic::{AtomicU8, Ordering};
3 use crate::sync::{Arc, Condvar, Mutex};
4 use crate::thread::{self, LocalKey};
5 use crate::thread_local;
7 #[derive(Clone, Default)]
8 struct Signal(Arc<(Mutex<bool>, Condvar)>);
12 let (set, cvar) = &*self.0;
13 *set.lock().unwrap() = true;
18 let (set, cvar) = &*self.0;
19 let mut set = set.lock().unwrap();
21 set = cvar.wait(set).unwrap();
26 struct NotifyOnDrop(Signal);
28 impl Drop for NotifyOnDrop {
30 let NotifyOnDrop(ref f) = *self;
37 thread_local!(static FOO: Cell<i32> = Cell::new(1));
39 thread_local!(static FOO2: Cell<i32> = const { Cell::new(1) });
42 fn run(key: &'static LocalKey<Cell<i32>>) {
44 assert_eq!(f.get(), 1);
47 let t = thread::spawn(move || {
49 assert_eq!(f.get(), 1);
55 assert_eq!(f.get(), 2);
62 struct Foo(&'static LocalKey<Foo>);
65 assert!(self.0.try_with(|_| ()).is_err());
69 thread_local!(static FOO: Foo = Foo(&FOO));
71 thread_local!(static FOO2: Foo = const { Foo(&FOO2) });
74 fn run(foo: &'static LocalKey<Foo>) {
75 thread::spawn(move || {
76 assert!(foo.try_with(|_| ()).is_ok());
85 thread_local!(static FOO: UnsafeCell<Option<NotifyOnDrop>> = UnsafeCell::new(None));
87 thread_local!(static FOO2: UnsafeCell<Option<NotifyOnDrop>> = const { UnsafeCell::new(None) });
90 fn run(key: &'static LocalKey<UnsafeCell<Option<NotifyOnDrop>>>) {
91 let signal = Signal::default();
92 let signal2 = signal.clone();
93 let t = thread::spawn(move || unsafe {
94 let mut signal = Some(signal2);
96 *f.get() = Some(NotifyOnDrop(signal.take().unwrap()));
106 struct S1(&'static LocalKey<UnsafeCell<Option<S1>>>, &'static LocalKey<UnsafeCell<Option<S2>>>);
107 struct S2(&'static LocalKey<UnsafeCell<Option<S1>>>, &'static LocalKey<UnsafeCell<Option<S2>>>);
108 thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
109 thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell::new(None));
110 thread_local!(static K3: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
111 thread_local!(static K4: UnsafeCell<Option<S2>> = const { UnsafeCell::new(None) });
112 static mut HITS: usize = 0;
118 if self.1.try_with(|_| ()).is_err() {
122 self.1.with(|s| *s.get() = Some(S2(self.0, self.1)));
134 assert!(self.0.try_with(|_| ()).is_ok());
136 self.0.with(|s| *s.get() = Some(S1(self.0, self.1)));
141 thread::spawn(move || {
151 thread::spawn(move || {
159 fn self_referential() {
160 struct S1(&'static LocalKey<UnsafeCell<Option<S1>>>);
162 thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
163 thread_local!(static K2: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
167 assert!(self.0.try_with(|_| ()).is_err());
171 thread::spawn(move || unsafe {
172 K1.with(|s| *s.get() = Some(S1(&K1)));
177 thread::spawn(move || unsafe {
178 K2.with(|s| *s.get() = Some(S1(&K2)));
184 // Note that this test will deadlock if TLS destructors aren't run (this
185 // requires the destructor to be run to pass the test).
187 fn dtors_in_dtors_in_dtors() {
189 thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
190 thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = UnsafeCell::new(None));
194 let S1(ref signal) = *self;
196 let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone())));
201 let signal = Signal::default();
202 let signal2 = signal.clone();
203 let _t = thread::spawn(move || unsafe {
204 let mut signal = Some(signal2);
205 K1.with(|s| *s.get() = Some(S1(signal.take().unwrap())));
211 fn dtors_in_dtors_in_dtors_const_init() {
213 thread_local!(static K1: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
214 thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = const { UnsafeCell::new(None) });
218 let S1(ref signal) = *self;
220 let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone())));
225 let signal = Signal::default();
226 let signal2 = signal.clone();
227 let _t = thread::spawn(move || unsafe {
228 let mut signal = Some(signal2);
229 K1.with(|s| *s.get() = Some(S1(signal.take().unwrap())));
234 // This test tests that TLS destructors have run before the thread joins. The
235 // test has no false positives (meaning: if the test fails, there's actually
236 // an ordering problem). It may have false negatives, where the test passes but
237 // join is not guaranteed to be after the TLS destructors. However, false
238 // negatives should be exceedingly rare due to judicious use of
239 // thread::yield_now and running the test several times.
241 fn join_orders_after_tls_destructors() {
242 // We emulate a synchronous MPSC rendezvous channel using only atomics and
243 // thread::yield_now. We can't use std::mpsc as the implementation itself
244 // may rely on thread locals.
246 // The basic state machine for an SPSC rendezvous channel is:
247 // FRESH -> THREAD1_WAITING -> MAIN_THREAD_RENDEZVOUS
248 // where the first transition is done by the “receiving” thread and the 2nd
249 // transition is done by the “sending” thread.
251 // We add an additional state `THREAD2_LAUNCHED` between `FRESH` and
252 // `THREAD1_WAITING` to block until all threads are actually running.
254 // A thread that joins on the “receiving” thread completion should never
255 // observe the channel in the `THREAD1_WAITING` state. If this does occur,
256 // we switch to the “poison” state `THREAD2_JOINED` and panic all around.
257 // (This is equivalent to “sending” from an alternate producer thread.)
259 const THREAD2_LAUNCHED: u8 = 1;
260 const THREAD1_WAITING: u8 = 2;
261 const MAIN_THREAD_RENDEZVOUS: u8 = 3;
262 const THREAD2_JOINED: u8 = 4;
263 static SYNC_STATE: AtomicU8 = AtomicU8::new(FRESH);
266 SYNC_STATE.store(FRESH, Ordering::SeqCst);
268 let jh = thread::Builder::new()
269 .name("thread1".into())
273 impl Drop for TlDrop {
275 let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::SeqCst);
278 THREAD2_LAUNCHED | THREAD1_WAITING => thread::yield_now(),
279 MAIN_THREAD_RENDEZVOUS => break,
280 THREAD2_JOINED => panic!(
281 "Thread 1 still running after thread 2 joined on thread 1"
283 v => unreachable!("sync state: {}", v),
285 sync_state = SYNC_STATE.load(Ordering::SeqCst);
291 static TL_DROP: TlDrop = TlDrop;
294 TL_DROP.with(|_| {});
297 match SYNC_STATE.load(Ordering::SeqCst) {
298 FRESH => thread::yield_now(),
299 THREAD2_LAUNCHED => break,
300 v => unreachable!("sync state: {}", v),
306 let jh2 = thread::Builder::new()
307 .name("thread2".into())
309 assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::SeqCst), FRESH);
311 match SYNC_STATE.swap(THREAD2_JOINED, Ordering::SeqCst) {
312 MAIN_THREAD_RENDEZVOUS => return,
313 THREAD2_LAUNCHED | THREAD1_WAITING => {
314 panic!("Thread 2 running after thread 1 join before main thread rendezvous")
316 v => unreachable!("sync state: {:?}", v),
322 match SYNC_STATE.compare_exchange(
324 MAIN_THREAD_RENDEZVOUS,
329 Err(FRESH) => thread::yield_now(),
330 Err(THREAD2_LAUNCHED) => thread::yield_now(),
331 Err(THREAD2_JOINED) => {
332 panic!("Main thread rendezvous after thread 2 joined thread 1")
334 v => unreachable!("sync state: {:?}", v),