1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! A "once initialization" primitive
13 //! This primitive is meant to be used to run one-time initialization. An
14 //! example use case would be for initializing an FFI library.
16 // A "once" is a relatively simple primitive, and it's also typically provided
17 // by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
18 // primitives, however, tend to have surprising restrictions, such as the Unix
19 // one doesn't allow an argument to be passed to the function.
21 // As a result, we end up implementing it ourselves in the standard library.
22 // This also gives us the opportunity to optimize the implementation a bit which
23 // should help the fast path on call sites. Consequently, let's explain how this
24 // primitive works now!
26 // So to recap, the guarantees of a Once are that it will call the
27 // initialization closure at most once, and it will never return until the one
28 // that's running has finished running. This means that we need some form of
29 // blocking here while the custom callback is running at the very least.
30 // Additionally, we add on the restriction of **poisoning**. Whenever an
31 // initialization closure panics, the Once enters a "poisoned" state which means
32 // that all future calls will immediately panic as well.
34 // So to implement this, one might first reach for a `StaticMutex`, but those
35 // unfortunately need to be deallocated (e.g. call `destroy()`) to free memory
36 // on all OSes (some of the BSDs allocate memory for mutexes). It also gets a
37 // lot harder with poisoning to figure out when the mutex needs to be
38 // deallocated because it's not after the closure finishes, but after the first
39 // successful closure finishes.
41 // All in all, this is instead implemented with atomics and lock-free
42 // operations! Whee! Each `Once` has one word of atomic state, and this state is
43 // CAS'd on to determine what to do. There are four possible state of a `Once`:
45 // * Incomplete - no initialization has run yet, and no thread is currently
47 // * Poisoned - some thread has previously attempted to initialize the Once, but
48 // it panicked, so the Once is now poisoned. There are no other
49 // threads currently accessing this Once.
50 // * Running - some thread is currently attempting to run initialization. It may
51 // succeed, so all future threads need to wait for it to finish.
52 // Note that this state is accompanied with a payload, described
54 // * Complete - initialization has completed and all future calls should finish
57 // With 4 states we need 2 bits to encode this, and we use the remaining bits
58 // in the word we have allocated as a queue of threads waiting for the thread
59 // responsible for entering the RUNNING state. This queue is just a linked list
60 // of Waiter nodes which is monotonically increasing in size. Each node is
61 // allocated on the stack, and whenever the running closure finishes it will
62 // consume the entire queue and notify all waiters they should try again.
64 // You'll find a few more details in the implementation, but that's the gist of
70 use sync::atomic::{AtomicUsize, AtomicBool, Ordering};
71 use thread::{self, Thread};
73 /// A synchronization primitive which can be used to run a one-time global
74 /// initialization. Useful for one-time initialization for FFI or related
75 /// functionality. This type can only be constructed with the [`ONCE_INIT`]
76 /// value or the equivalent [`Once::new`] constructor.
78 /// [`ONCE_INIT`]: constant.ONCE_INIT.html
79 /// [`Once::new`]: struct.Once.html#method.new
84 /// use std::sync::Once;
86 /// static START: Once = Once::new();
88 /// START.call_once(|| {
89 /// // run initialization here
92 #[stable(feature = "rust1", since = "1.0.0")]
94 // This `state` word is actually an encoded version of just a pointer to a
95 // `Waiter`, so we add the `PhantomData` appropriately.
97 _marker: marker::PhantomData<*mut Waiter>,
100 // The `PhantomData` of a raw pointer removes these two auto traits, but we
101 // enforce both below in the implementation so this should be safe to add.
102 #[stable(feature = "rust1", since = "1.0.0")]
103 unsafe impl Sync for Once {}
104 #[stable(feature = "rust1", since = "1.0.0")]
105 unsafe impl Send for Once {}
107 /// State yielded to [`call_once_force`]’s closure parameter. The state can be
108 /// used to query the poison status of the [`Once`].
110 /// [`call_once_force`]: struct.Once.html#method.call_once_force
111 /// [`Once`]: struct.Once.html
112 #[unstable(feature = "once_poison", issue = "33577")]
114 pub struct OnceState {
118 /// Initialization value for static [`Once`] values.
120 /// [`Once`]: struct.Once.html
125 /// use std::sync::{Once, ONCE_INIT};
127 /// static START: Once = ONCE_INIT;
129 #[stable(feature = "rust1", since = "1.0.0")]
130 pub const ONCE_INIT: Once = Once::new();
132 // Four states that a Once can be in, encoded into the lower bits of `state` in
133 // the Once structure.
134 const INCOMPLETE: usize = 0x0;
135 const POISONED: usize = 0x1;
136 const RUNNING: usize = 0x2;
137 const COMPLETE: usize = 0x3;
139 // Mask to learn about the state. All other bits are the queue of waiters if
140 // this is in the RUNNING state.
141 const STATE_MASK: usize = 0x3;
143 // Representation of a node in the linked list of waiters in the RUNNING state.
145 thread: Option<Thread>,
146 signaled: AtomicBool,
150 // Helper struct used to clean up after a closure call with a `Drop`
151 // implementation to also run on panic.
158 /// Creates a new `Once` value.
159 #[stable(feature = "once_new", since = "1.2.0")]
160 pub const fn new() -> Once {
162 state: AtomicUsize::new(INCOMPLETE),
163 _marker: marker::PhantomData,
167 /// Performs an initialization routine once and only once. The given closure
168 /// will be executed if this is the first time `call_once` has been called,
169 /// and otherwise the routine will *not* be invoked.
171 /// This method will block the calling thread if another initialization
172 /// routine is currently running.
174 /// When this function returns, it is guaranteed that some initialization
175 /// has run and completed (it may not be the closure specified). It is also
176 /// guaranteed that any memory writes performed by the executed closure can
177 /// be reliably observed by other threads at this point (there is a
178 /// happens-before relation between the closure and code executing after the
181 /// If the given closure recusively invokes `call_once` on the same `Once`
182 /// instance the exact behavior is not specified, allowed outcomes are
183 /// a panic or a deadlock.
188 /// use std::sync::Once;
190 /// static mut VAL: usize = 0;
191 /// static INIT: Once = Once::new();
193 /// // Accessing a `static mut` is unsafe much of the time, but if we do so
194 /// // in a synchronized fashion (e.g. write once or read all) then we're
197 /// // This function will only call `expensive_computation` once, and will
198 /// // otherwise always return the value returned from the first invocation.
199 /// fn get_cached_val() -> usize {
201 /// INIT.call_once(|| {
202 /// VAL = expensive_computation();
208 /// fn expensive_computation() -> usize {
216 /// The closure `f` will only be executed once if this is called
217 /// concurrently amongst many threads. If that closure panics, however, then
218 /// it will *poison* this `Once` instance, causing all future invocations of
219 /// `call_once` to also panic.
221 /// This is similar to [poisoning with mutexes][poison].
223 /// [poison]: struct.Mutex.html#poisoning
224 #[stable(feature = "rust1", since = "1.0.0")]
225 pub fn call_once<F>(&self, f: F) where F: FnOnce() {
226 // Fast path, just see if we've completed initialization.
227 // An `Acquire` load is enough because that makes all the initialization
228 // operations visible to us. The cold path uses SeqCst consistently
229 // because the performance difference really does not matter there,
230 // and SeqCst minimizes the chances of something going wrong.
231 if self.state.load(Ordering::Acquire) == COMPLETE {
236 self.call_inner(false, &mut |_| f.take().unwrap()());
239 /// Performs the same function as [`call_once`] except ignores poisoning.
241 /// Unlike [`call_once`], if this `Once` has been poisoned (i.e. a previous
242 /// call to `call_once` or `call_once_force` caused a panic), calling
243 /// `call_once_force` will still invoke the closure `f` and will _not_
244 /// result in an immediate panic. If `f` panics, the `Once` will remain
245 /// in a poison state. If `f` does _not_ panic, the `Once` will no
246 /// longer be in a poison state and all future calls to `call_once` or
247 /// `call_one_force` will no-op.
249 /// The closure `f` is yielded a [`OnceState`] structure which can be used
250 /// to query the poison status of the `Once`.
252 /// [`call_once`]: struct.Once.html#method.call_once
253 /// [`OnceState`]: struct.OnceState.html
258 /// #![feature(once_poison)]
260 /// use std::sync::Once;
263 /// static INIT: Once = Once::new();
265 /// // poison the once
266 /// let handle = thread::spawn(|| {
267 /// INIT.call_once(|| panic!());
269 /// assert!(handle.join().is_err());
271 /// // poisoning propagates
272 /// let handle = thread::spawn(|| {
273 /// INIT.call_once(|| {});
275 /// assert!(handle.join().is_err());
277 /// // call_once_force will still run and reset the poisoned state
278 /// INIT.call_once_force(|state| {
279 /// assert!(state.poisoned());
282 /// // once any success happens, we stop propagating the poison
283 /// INIT.call_once(|| {});
285 #[unstable(feature = "once_poison", issue = "33577")]
286 pub fn call_once_force<F>(&self, f: F) where F: FnOnce(&OnceState) {
287 // same as above, just with a different parameter to `call_inner`.
288 // An `Acquire` load is enough because that makes all the initialization
289 // operations visible to us. The cold path uses SeqCst consistently
290 // because the performance difference really does not matter there,
291 // and SeqCst minimizes the chances of something going wrong.
292 if self.state.load(Ordering::Acquire) == COMPLETE {
297 self.call_inner(true, &mut |p| {
298 f.take().unwrap()(&OnceState { poisoned: p })
302 // This is a non-generic function to reduce the monomorphization cost of
303 // using `call_once` (this isn't exactly a trivial or small implementation).
305 // Additionally, this is tagged with `#[cold]` as it should indeed be cold
306 // and it helps let LLVM know that calls to this function should be off the
307 // fast path. Essentially, this should help generate more straight line code
310 // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
311 // currently no way to take an `FnOnce` and call it via virtual dispatch
312 // without some allocation overhead.
315 ignore_poisoning: bool,
316 init: &mut dyn FnMut(bool)) {
317 let mut state = self.state.load(Ordering::SeqCst);
321 // If we're complete, then there's nothing to do, we just
322 // jettison out as we shouldn't run the closure.
325 // If we're poisoned and we're not in a mode to ignore
326 // poisoning, then we panic here to propagate the poison.
327 POISONED if !ignore_poisoning => {
328 panic!("Once instance has previously been poisoned");
331 // Otherwise if we see a poisoned or otherwise incomplete state
332 // we will attempt to move ourselves into the RUNNING state. If
333 // we succeed, then the queue of waiters starts at null (all 0
337 let old = self.state.compare_and_swap(state, RUNNING,
344 // Run the initialization routine, letting it know if we're
345 // poisoned or not. The `Finish` struct is then dropped, and
346 // the `Drop` implementation here is responsible for waking
347 // up other waiters both in the normal return and panicking
349 let mut complete = Finish {
353 init(state == POISONED);
354 complete.panicked = false;
358 // All other values we find should correspond to the RUNNING
359 // state with an encoded waiter list in the more significant
360 // bits. We attempt to enqueue ourselves by moving us to the
361 // head of the list and bail out if we ever see a state that's
364 assert!(state & STATE_MASK == RUNNING);
365 let mut node = Waiter {
366 thread: Some(thread::current()),
367 signaled: AtomicBool::new(false),
368 next: ptr::null_mut(),
370 let me = &mut node as *mut Waiter as usize;
371 assert!(me & STATE_MASK == 0);
373 while state & STATE_MASK == RUNNING {
374 node.next = (state & !STATE_MASK) as *mut Waiter;
375 let old = self.state.compare_and_swap(state,
383 // Once we've enqueued ourselves, wait in a loop.
384 // Afterwards reload the state and continue with what we
385 // were doing from before.
386 while !node.signaled.load(Ordering::SeqCst) {
389 state = self.state.load(Ordering::SeqCst);
398 #[stable(feature = "std_debug", since = "1.16.0")]
399 impl fmt::Debug for Once {
400 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
405 impl<'a> Drop for Finish<'a> {
407 // Swap out our state with however we finished. We should only ever see
408 // an old state which was RUNNING.
409 let queue = if self.panicked {
410 self.me.state.swap(POISONED, Ordering::SeqCst)
412 self.me.state.swap(COMPLETE, Ordering::SeqCst)
414 assert_eq!(queue & STATE_MASK, RUNNING);
416 // Decode the RUNNING to a list of waiters, then walk that entire list
417 // and wake them up. Note that it is crucial that after we store `true`
418 // in the node it can be free'd! As a result we load the `thread` to
419 // signal ahead of time and then unpark it after the store.
421 let mut queue = (queue & !STATE_MASK) as *mut Waiter;
422 while !queue.is_null() {
423 let next = (*queue).next;
424 let thread = (*queue).thread.take().unwrap();
425 (*queue).signaled.store(true, Ordering::SeqCst);
434 /// Returns whether the associated [`Once`] was poisoned prior to the
435 /// invocation of the closure passed to [`call_once_force`].
437 /// [`call_once_force`]: struct.Once.html#method.call_once_force
438 /// [`Once`]: struct.Once.html
442 /// A poisoned `Once`:
445 /// #![feature(once_poison)]
447 /// use std::sync::Once;
450 /// static INIT: Once = Once::new();
452 /// // poison the once
453 /// let handle = thread::spawn(|| {
454 /// INIT.call_once(|| panic!());
456 /// assert!(handle.join().is_err());
458 /// INIT.call_once_force(|state| {
459 /// assert!(state.poisoned());
463 /// An unpoisoned `Once`:
466 /// #![feature(once_poison)]
468 /// use std::sync::Once;
470 /// static INIT: Once = Once::new();
472 /// INIT.call_once_force(|state| {
473 /// assert!(!state.poisoned());
475 #[unstable(feature = "once_poison", issue = "33577")]
476 pub fn poisoned(&self) -> bool {
481 #[cfg(all(test, not(target_os = "emscripten")))]
484 use sync::mpsc::channel;
490 static O: Once = Once::new();
492 O.call_once(|| a += 1);
494 O.call_once(|| a += 1);
500 static O: Once = Once::new();
501 static mut RUN: bool = false;
503 let (tx, rx) = channel();
506 thread::spawn(move|| {
507 for _ in 0..4 { thread::yield_now() }
515 tx.send(()).unwrap();
534 static O: Once = Once::new();
537 let t = panic::catch_unwind(|| {
538 O.call_once(|| panic!());
542 // poisoning propagates
543 let t = panic::catch_unwind(|| {
548 // we can subvert poisoning, however
549 let mut called = false;
550 O.call_once_force(|p| {
552 assert!(p.poisoned())
556 // once any success happens, we stop propagating the poison
561 fn wait_for_force_to_finish() {
562 static O: Once = Once::new();
565 let t = panic::catch_unwind(|| {
566 O.call_once(|| panic!());
570 // make sure someone's waiting inside the once via a force
571 let (tx1, rx1) = channel();
572 let (tx2, rx2) = channel();
573 let t1 = thread::spawn(move || {
574 O.call_once_force(|p| {
575 assert!(p.poisoned());
576 tx1.send(()).unwrap();
583 // put another waiter on the once
584 let t2 = thread::spawn(|| {
585 let mut called = false;
592 tx2.send(()).unwrap();
594 assert!(t1.join().is_ok());
595 assert!(t2.join().is_ok());