1 use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
6 use self::sync_bitset::*;
8 #[cfg(target_pointer_width="64")]
9 const USIZE_BITS: usize = 64;
10 const TLS_KEYS: usize = 128; // Same as POSIX minimum
11 const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS;
13 static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT;
15 ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* ));
16 (() $($val:tt)*) => ([$($val),*])
18 static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) ATOMIC_USIZE_INIT);
21 fn get_tls_ptr() -> *const u8;
22 fn set_tls_ptr(tls: *const u8);
25 #[derive(Copy, Clone)]
27 pub struct Key(NonZeroUsize);
30 fn to_index(self) -> usize {
34 fn from_index(index: usize) -> Self {
35 Key(NonZeroUsize::new(index + 1).unwrap())
38 pub fn as_usize(self) -> usize {
42 pub fn from_usize(index: usize) -> Self {
43 Key(NonZeroUsize::new(index).unwrap())
49 data: [Cell<*mut u8>; TLS_KEYS]
52 pub struct ActiveTls<'a> {
56 impl<'a> Drop for ActiveTls<'a> {
58 let value_with_destructor = |key: usize| {
59 let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed);
60 unsafe { mem::transmute::<_,Option<unsafe extern fn(*mut u8)>>(ptr) }
61 .map(|dtor| (&self.tls.data[key], dtor))
64 let mut any_non_null_dtor = true;
65 while any_non_null_dtor {
66 any_non_null_dtor = false;
67 for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) {
68 let value = value.replace(ptr::null_mut());
69 if value != ptr::null_mut() {
70 any_non_null_dtor = true;
71 unsafe { dtor(value) }
80 Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) }
83 pub unsafe fn activate(&self) -> ActiveTls {
84 set_tls_ptr(self as *const Tls as _);
85 ActiveTls { tls: self }
89 pub unsafe fn activate_persistent(self: Box<Self>) {
90 set_tls_ptr((&*self) as *const Tls as _);
94 unsafe fn current<'a>() -> &'a Tls {
95 &*(get_tls_ptr() as *const Tls)
98 pub fn create(dtor: Option<unsafe extern fn(*mut u8)>) -> Key {
99 let index = TLS_KEY_IN_USE.set().expect("TLS limit exceeded");
100 TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed);
101 Key::from_index(index)
104 pub fn set(key: Key, value: *mut u8) {
105 let index = key.to_index();
106 assert!(TLS_KEY_IN_USE.get(index));
107 unsafe { Self::current() }.data[index].set(value);
110 pub fn get(key: Key) -> *mut u8 {
111 let index = key.to_index();
112 assert!(TLS_KEY_IN_USE.get(index));
113 unsafe { Self::current() }.data[index].get()
116 pub fn destroy(key: Key) {
117 TLS_KEY_IN_USE.clear(key.to_index());
122 use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
123 use iter::{Enumerate, Peekable};
125 use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS};
127 /// A bitset that can be used synchronously.
128 pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]);
130 pub(super) const SYNC_BITSET_INIT: SyncBitset =
131 SyncBitset([ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT]);
134 pub fn get(&self, index: usize) -> bool {
135 let (hi, lo) = Self::split(index);
136 (self.0[hi].load(Ordering::Relaxed) & lo) != 0
140 pub fn iter(&self) -> SyncBitsetIter {
142 iter: self.0.iter().enumerate().peekable(),
147 pub fn clear(&self, index: usize) {
148 let (hi, lo) = Self::split(index);
149 self.0[hi].fetch_and(!lo, Ordering::Relaxed);
152 /// Set any unset bit. Not atomic. Returns `None` if all bits were
153 /// observed to be set.
154 pub fn set(&self) -> Option<usize> {
155 'elems: for (idx, elem) in self.0.iter().enumerate() {
156 let mut current = elem.load(Ordering::Relaxed);
161 let trailing_ones = (!current).trailing_zeros() as usize;
162 match elem.compare_exchange(
164 current | (1 << trailing_ones),
168 Ok(_) => return Some(idx * USIZE_BITS + trailing_ones),
169 Err(previous) => current = previous,
176 fn split(index: usize) -> (usize, usize) {
177 (index / USIZE_BITS, 1 << (index % USIZE_BITS))
181 pub(super) struct SyncBitsetIter<'a> {
182 iter: Peekable<Enumerate<Iter<'a, AtomicUsize>>>,
186 impl<'a> Iterator for SyncBitsetIter<'a> {
189 fn next(&mut self) -> Option<usize> {
190 self.iter.peek().cloned().and_then(|(idx, elem)| {
191 let elem = elem.load(Ordering::Relaxed);
192 let low_mask = (1 << self.elem_idx) - 1;
193 let next = elem & !low_mask;
194 let next_idx = next.trailing_zeros() as usize;
195 self.elem_idx = next_idx + 1;
196 if self.elem_idx >= 64 {
202 _ => Some(idx * USIZE_BITS + next_idx),
212 fn test_data(bitset: [usize; 2], bit_indices: &[usize]) {
213 let set = SyncBitset([AtomicUsize::new(bitset[0]), AtomicUsize::new(bitset[1])]);
214 assert_eq!(set.iter().collect::<Vec<_>>(), bit_indices);
215 for &i in bit_indices {
222 test_data([0b0110_1001, 0], &[0, 3, 5, 6]);
223 test_data([0x8000_0000_0000_0000, 0x8000_0000_0000_0001], &[63, 64, 127]);
224 test_data([0, 0], &[]);
229 let set = SYNC_BITSET_INIT;
230 let key = set.set().unwrap();
231 assert!(set.get(key));
233 assert!(!set.get(key));