1 use crate::sync::atomic::{AtomicUsize, Ordering};
5 use crate::num::NonZeroUsize;
6 use self::sync_bitset::*;
8 #[cfg(target_pointer_width="64")]
9 const USIZE_BITS: usize = 64;
10 const TLS_KEYS: usize = 128; // Same as POSIX minimum
11 const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS;
13 #[cfg_attr(test, linkage = "available_externally")]
14 #[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"]
15 static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT;
17 ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* ));
18 (() $($val:tt)*) => ([$($val),*])
20 #[cfg_attr(test, linkage = "available_externally")]
21 #[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"]
22 static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0)));
25 fn get_tls_ptr() -> *const u8;
26 fn set_tls_ptr(tls: *const u8);
29 #[derive(Copy, Clone)]
31 pub struct Key(NonZeroUsize);
34 fn to_index(self) -> usize {
38 fn from_index(index: usize) -> Self {
39 Key(NonZeroUsize::new(index + 1).unwrap())
42 pub fn as_usize(self) -> usize {
46 pub fn from_usize(index: usize) -> Self {
47 Key(NonZeroUsize::new(index).unwrap())
53 data: [Cell<*mut u8>; TLS_KEYS]
56 pub struct ActiveTls<'a> {
60 impl<'a> Drop for ActiveTls<'a> {
62 let value_with_destructor = |key: usize| {
63 let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed);
64 unsafe { mem::transmute::<_,Option<unsafe extern fn(*mut u8)>>(ptr) }
65 .map(|dtor| (&self.tls.data[key], dtor))
68 let mut any_non_null_dtor = true;
69 while any_non_null_dtor {
70 any_non_null_dtor = false;
71 for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) {
72 let value = value.replace(ptr::null_mut());
73 if value != ptr::null_mut() {
74 any_non_null_dtor = true;
75 unsafe { dtor(value) }
84 Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) }
87 pub unsafe fn activate(&self) -> ActiveTls {
88 set_tls_ptr(self as *const Tls as _);
89 ActiveTls { tls: self }
93 pub unsafe fn activate_persistent(self: Box<Self>) {
94 set_tls_ptr((&*self) as *const Tls as _);
98 unsafe fn current<'a>() -> &'a Tls {
99 &*(get_tls_ptr() as *const Tls)
102 pub fn create(dtor: Option<unsafe extern fn(*mut u8)>) -> Key {
103 let index = TLS_KEY_IN_USE.set().expect("TLS limit exceeded");
104 TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed);
105 Key::from_index(index)
108 pub fn set(key: Key, value: *mut u8) {
109 let index = key.to_index();
110 assert!(TLS_KEY_IN_USE.get(index));
111 unsafe { Self::current() }.data[index].set(value);
114 pub fn get(key: Key) -> *mut u8 {
115 let index = key.to_index();
116 assert!(TLS_KEY_IN_USE.get(index));
117 unsafe { Self::current() }.data[index].get()
120 pub fn destroy(key: Key) {
121 TLS_KEY_IN_USE.clear(key.to_index());
126 use crate::sync::atomic::{AtomicUsize, Ordering};
127 use crate::iter::{Enumerate, Peekable};
128 use crate::slice::Iter;
129 use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS};
131 /// A bitset that can be used synchronously.
132 pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]);
134 pub(super) const SYNC_BITSET_INIT: SyncBitset =
135 SyncBitset([AtomicUsize::new(0), AtomicUsize::new(0)]);
138 pub fn get(&self, index: usize) -> bool {
139 let (hi, lo) = Self::split(index);
140 (self.0[hi].load(Ordering::Relaxed) & lo) != 0
144 pub fn iter(&self) -> SyncBitsetIter {
146 iter: self.0.iter().enumerate().peekable(),
151 pub fn clear(&self, index: usize) {
152 let (hi, lo) = Self::split(index);
153 self.0[hi].fetch_and(!lo, Ordering::Relaxed);
156 /// Sets any unset bit. Not atomic. Returns `None` if all bits were
157 /// observed to be set.
158 pub fn set(&self) -> Option<usize> {
159 'elems: for (idx, elem) in self.0.iter().enumerate() {
160 let mut current = elem.load(Ordering::Relaxed);
165 let trailing_ones = (!current).trailing_zeros() as usize;
166 match elem.compare_exchange(
168 current | (1 << trailing_ones),
172 Ok(_) => return Some(idx * USIZE_BITS + trailing_ones),
173 Err(previous) => current = previous,
180 fn split(index: usize) -> (usize, usize) {
181 (index / USIZE_BITS, 1 << (index % USIZE_BITS))
185 pub(super) struct SyncBitsetIter<'a> {
186 iter: Peekable<Enumerate<Iter<'a, AtomicUsize>>>,
190 impl<'a> Iterator for SyncBitsetIter<'a> {
193 fn next(&mut self) -> Option<usize> {
194 self.iter.peek().cloned().and_then(|(idx, elem)| {
195 let elem = elem.load(Ordering::Relaxed);
196 let low_mask = (1 << self.elem_idx) - 1;
197 let next = elem & !low_mask;
198 let next_idx = next.trailing_zeros() as usize;
199 self.elem_idx = next_idx + 1;
200 if self.elem_idx >= 64 {
206 _ => Some(idx * USIZE_BITS + next_idx),
216 fn test_data(bitset: [usize; 2], bit_indices: &[usize]) {
217 let set = SyncBitset([AtomicUsize::new(bitset[0]), AtomicUsize::new(bitset[1])]);
218 assert_eq!(set.iter().collect::<Vec<_>>(), bit_indices);
219 for &i in bit_indices {
226 test_data([0b0110_1001, 0], &[0, 3, 5, 6]);
227 test_data([0x8000_0000_0000_0000, 0x8000_0000_0000_0001], &[63, 64, 127]);
228 test_data([0, 0], &[]);
233 let set = SYNC_BITSET_INIT;
234 let key = set.set().unwrap();
235 assert!(set.get(key));
237 assert!(!set.get(key));