1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! A "once initialization" primitive
13 //! This primitive is meant to be used to run one-time initialization. An
14 //! example use case would be for initializing an FFI library.
19 use sync::atomic::{AtomicIsize, Ordering};
20 use sync::StaticMutex;
22 /// A synchronization primitive which can be used to run a one-time global
23 /// initialization. Useful for one-time initialization for FFI or related
24 /// functionality. This type can only be constructed with the `ONCE_INIT`
30 /// use std::sync::{Once, ONCE_INIT};
32 /// static START: Once = ONCE_INIT;
34 /// START.call_once(|| {
35 /// // run initialization here
38 #[stable(feature = "rust1", since = "1.0.0")]
42 lock_cnt: AtomicIsize,
45 /// Initialization value for static `Once` values.
46 #[stable(feature = "rust1", since = "1.0.0")]
47 pub const ONCE_INIT: Once = Once::new();
50 /// Creates a new `Once` value.
51 #[unstable(feature = "once_new")]
52 pub const fn new() -> Once {
54 mutex: StaticMutex::new(),
55 cnt: AtomicIsize::new(0),
56 lock_cnt: AtomicIsize::new(0),
60 /// Performs an initialization routine once and only once. The given closure
61 /// will be executed if this is the first time `call_once` has been called,
62 /// and otherwise the routine will *not* be invoked.
64 /// This method will block the calling thread if another initialization
65 /// routine is currently running.
67 /// When this function returns, it is guaranteed that some initialization
68 /// has run and completed (it may not be the closure specified). It is also
69 /// guaranteed that any memory writes performed by the executed closure can
70 /// be reliably observed by other threads at this point (there is a
71 /// happens-before relation between the closure and code executing after the
73 #[stable(feature = "rust1", since = "1.0.0")]
74 pub fn call_once<F>(&'static self, f: F) where F: FnOnce() {
75 // Optimize common path: load is much cheaper than fetch_add.
76 if self.cnt.load(Ordering::SeqCst) < 0 {
80 // Implementation-wise, this would seem like a fairly trivial primitive.
81 // The stickler part is where our mutexes currently require an
82 // allocation, and usage of a `Once` shouldn't leak this allocation.
84 // This means that there must be a deterministic destroyer of the mutex
85 // contained within (because it's not needed after the initialization
88 // The general scheme here is to gate all future threads once
89 // initialization has completed with a "very negative" count, and to
90 // allow through threads to lock the mutex if they see a non negative
91 // count. For all threads grabbing the mutex, exactly one of them should
92 // be responsible for unlocking the mutex, and this should only be done
93 // once everyone else is done with the mutex.
95 // This atomicity is achieved by swapping a very negative value into the
96 // shared count when the initialization routine has completed. This will
97 // read the number of threads which will at some point attempt to
98 // acquire the mutex. This count is then squirreled away in a separate
99 // variable, and the last person on the way out of the mutex is then
100 // responsible for destroying the mutex.
102 // It is crucial that the negative value is swapped in *after* the
103 // initialization routine has completed because otherwise new threads
104 // calling `call_once` will return immediately before the initialization
107 let prev = self.cnt.fetch_add(1, Ordering::SeqCst);
109 // Make sure we never overflow, we'll never have isize::MIN
110 // simultaneous calls to `call_once` to make this value go back to 0
111 self.cnt.store(isize::MIN, Ordering::SeqCst);
115 // If the count is negative, then someone else finished the job,
116 // otherwise we run the job and record how many people will try to grab
118 let guard = self.mutex.lock();
119 if self.cnt.load(Ordering::SeqCst) > 0 {
121 let prev = self.cnt.swap(isize::MIN, Ordering::SeqCst);
122 self.lock_cnt.store(prev, Ordering::SeqCst);
126 // Last one out cleans up after everyone else, no leaks!
127 if self.lock_cnt.fetch_add(-1, Ordering::SeqCst) == 1 {
128 unsafe { self.mutex.destroy() }
139 use sync::mpsc::channel;
143 static O: Once = Once::new();
145 O.call_once(|| a += 1);
147 O.call_once(|| a += 1);
153 static O: Once = Once::new();
154 static mut run: bool = false;
156 let (tx, rx) = channel();
159 thread::spawn(move|| {
160 for _ in 0..4 { thread::yield_now() }
168 tx.send(()).unwrap();