From: Jethro Beekman Date: Tue, 28 Aug 2018 04:40:08 +0000 (-0700) Subject: SGX target: add thread local storage X-Git-Url: https://git.lizzy.rs/?a=commitdiff_plain;h=39f9751716986b498bfb2b14b481751a4256957f;p=rust.git SGX target: add thread local storage --- diff --git a/src/libstd/sys/sgx/abi/mod.rs b/src/libstd/sys/sgx/abi/mod.rs index cade96e3f52..99ea7a381f3 100644 --- a/src/libstd/sys/sgx/abi/mod.rs +++ b/src/libstd/sys/sgx/abi/mod.rs @@ -17,6 +17,8 @@ pub(super) mod panic; // library features +pub mod thread; +pub mod tls; #[macro_use] mod usercalls; @@ -59,6 +61,10 @@ #[no_mangle] #[allow(unreachable_code)] extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64) { + // FIXME: how to support TLS in library mode? + let tls = Box::new(tls::Tls::new()); + let _tls_guard = unsafe { tls.activate() }; + if secondary { unimplemented!("thread entrypoint"); diff --git a/src/libstd/sys/sgx/abi/thread.rs b/src/libstd/sys/sgx/abi/thread.rs new file mode 100644 index 00000000000..4640b812fea --- /dev/null +++ b/src/libstd/sys/sgx/abi/thread.rs @@ -0,0 +1,20 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use fortanix_sgx_abi::Tcs; + +/// Get the ID for the current thread. The ID is guaranteed to be unique among +/// all currently running threads in the enclave, and it is guaranteed to be +/// constant for the lifetime of the thread. More specifically for SGX, there +/// is a one-to-one correspondence of the ID to the address of the TCS. +pub fn current() -> Tcs { + extern "C" { fn get_tcs_addr() -> Tcs; } + unsafe { get_tcs_addr() } +} diff --git a/src/libstd/sys/sgx/abi/tls.rs b/src/libstd/sys/sgx/abi/tls.rs new file mode 100644 index 00000000000..ab7822182a5 --- /dev/null +++ b/src/libstd/sys/sgx/abi/tls.rs @@ -0,0 +1,246 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use ptr; +use mem; +use cell::Cell; +use num::NonZeroUsize; +use self::sync_bitset::*; + +#[cfg(target_pointer_width="64")] +const USIZE_BITS: usize = 64; +const TLS_KEYS: usize = 128; // Same as POSIX minimum +const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS; + +static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT; +macro_rules! dup { + ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* )); + (() $($val:tt)*) => ([$($val),*]) +} +static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) ATOMIC_USIZE_INIT); + +extern "C" { + fn get_tls_ptr() -> *const u8; + fn set_tls_ptr(tls: *const u8); +} + +#[derive(Copy, Clone)] +#[repr(C)] +pub struct Key(NonZeroUsize); + +impl Key { + fn to_index(self) -> usize { + self.0.get() - 1 + } + + fn from_index(index: usize) -> Self { + Key(NonZeroUsize::new(index + 1).unwrap()) + } + + pub fn as_usize(self) -> usize { + self.0.get() + } + + pub fn from_usize(index: usize) -> Self { + Key(NonZeroUsize::new(index).unwrap()) + } +} + +#[repr(C)] +pub struct Tls { + data: [Cell<*mut u8>; TLS_KEYS] +} + +pub struct ActiveTls<'a> { + tls: &'a Tls +} + +impl<'a> Drop for ActiveTls<'a> { + fn drop(&mut self) { + let value_with_destructor = |key: usize| { + let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed); + unsafe { mem::transmute::<_,Option>(ptr) } + .map(|dtor| (&self.tls.data[key], dtor)) + }; + + let mut any_non_null_dtor = true; + while any_non_null_dtor { + any_non_null_dtor = false; + for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) { + let value = value.replace(ptr::null_mut()); + if value != ptr::null_mut() { + any_non_null_dtor = true; + unsafe { dtor(value) } + } + } + } + } +} + +impl Tls { + pub fn new() -> Tls { + Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) } + } + + pub unsafe fn activate(&self) -> ActiveTls { + set_tls_ptr(self as *const Tls as _); + ActiveTls { tls: self } + } + + #[allow(unused)] + pub unsafe fn activate_persistent(self: Box) { + set_tls_ptr((&*self) as *const Tls as _); + mem::forget(self); + } + + unsafe fn current<'a>() -> &'a Tls { + &*(get_tls_ptr() as *const Tls) + } + + pub fn create(dtor: Option) -> Key { + let index = TLS_KEY_IN_USE.set().expect("TLS limit exceeded"); + TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed); + Key::from_index(index) + } + + pub fn set(key: Key, value: *mut u8) { + let index = key.to_index(); + assert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].set(value); + } + + pub fn get(key: Key) -> *mut u8 { + let index = key.to_index(); + assert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].get() + } + + pub fn destroy(key: Key) { + TLS_KEY_IN_USE.clear(key.to_index()); + } +} + +mod sync_bitset { + use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use iter::{Enumerate, Peekable}; + use slice::Iter; + use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS}; + + /// A bitset that can be used synchronously. + pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]); + + pub(super) const SYNC_BITSET_INIT: SyncBitset = + SyncBitset([ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT]); + + impl SyncBitset { + pub fn get(&self, index: usize) -> bool { + let (hi, lo) = Self::split(index); + (self.0[hi].load(Ordering::Relaxed) & lo) != 0 + } + + /// Not atomic. + pub fn iter(&self) -> SyncBitsetIter { + SyncBitsetIter { + iter: self.0.iter().enumerate().peekable(), + elem_idx: 0, + } + } + + pub fn clear(&self, index: usize) { + let (hi, lo) = Self::split(index); + self.0[hi].fetch_and(!lo, Ordering::Relaxed); + } + + /// Set any unset bit. Not atomic. Returns `None` if all bits were + /// observed to be set. + pub fn set(&self) -> Option { + 'elems: for (idx, elem) in self.0.iter().enumerate() { + let mut current = elem.load(Ordering::Relaxed); + loop { + if 0 == !current { + continue 'elems; + } + let trailing_ones = (!current).trailing_zeros() as usize; + match elem.compare_exchange( + current, + current | (1 << trailing_ones), + Ordering::AcqRel, + Ordering::Relaxed + ) { + Ok(_) => return Some(idx * USIZE_BITS + trailing_ones), + Err(previous) => current = previous, + } + } + } + None + } + + fn split(index: usize) -> (usize, usize) { + (index / USIZE_BITS, 1 << (index % USIZE_BITS)) + } + } + + pub(super) struct SyncBitsetIter<'a> { + iter: Peekable>>, + elem_idx: usize, + } + + impl<'a> Iterator for SyncBitsetIter<'a> { + type Item = usize; + + fn next(&mut self) -> Option { + self.iter.peek().cloned().and_then(|(idx, elem)| { + let elem = elem.load(Ordering::Relaxed); + let low_mask = (1 << self.elem_idx) - 1; + let next = elem & !low_mask; + let next_idx = next.trailing_zeros() as usize; + self.elem_idx = next_idx + 1; + if self.elem_idx >= 64 { + self.elem_idx = 0; + self.iter.next(); + } + match next_idx { + 64 => self.next(), + _ => Some(idx * USIZE_BITS + next_idx), + } + }) + } + } + + #[cfg(test)] + mod tests { + use super::*; + + fn test_data(bitset: [usize; 2], bit_indices: &[usize]) { + let set = SyncBitset([AtomicUsize::new(bitset[0]), AtomicUsize::new(bitset[1])]); + assert_eq!(set.iter().collect::>(), bit_indices); + for &i in bit_indices { + assert!(set.get(i)); + } + } + + #[test] + fn iter() { + test_data([0b0110_1001, 0], &[0, 3, 5, 6]); + test_data([0x8000_0000_0000_0000, 0x8000_0000_0000_0001], &[63, 64, 127]); + test_data([0, 0], &[]); + } + + #[test] + fn set_get_clear() { + let set = SYNC_BITSET_INIT; + let key = set.set().unwrap(); + assert!(set.get(key)); + set.clear(key); + assert!(!set.get(key)); + } + } +} diff --git a/src/libstd/sys/sgx/thread_local.rs b/src/libstd/sys/sgx/thread_local.rs index 2126e0a853e..3b628bae4fb 100644 --- a/src/libstd/sys/sgx/thread_local.rs +++ b/src/libstd/sys/sgx/thread_local.rs @@ -8,40 +8,28 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use boxed::Box; -use ptr; +use super::abi::tls::{Tls, Key as AbiKey}; pub type Key = usize; -struct Allocated { - value: *mut u8, - dtor: Option, -} - #[inline] pub unsafe fn create(dtor: Option) -> Key { - Box::into_raw(Box::new(Allocated { - value: ptr::null_mut(), - dtor, - })) as usize + Tls::create(dtor).as_usize() } #[inline] pub unsafe fn set(key: Key, value: *mut u8) { - (*(key as *mut Allocated)).value = value; + Tls::set(AbiKey::from_usize(key), value) } #[inline] pub unsafe fn get(key: Key) -> *mut u8 { - (*(key as *mut Allocated)).value + Tls::get(AbiKey::from_usize(key)) } #[inline] pub unsafe fn destroy(key: Key) { - let key = Box::from_raw(key as *mut Allocated); - if let Some(f) = key.dtor { - f(key.value); - } + Tls::destroy(AbiKey::from_usize(key)) } #[inline]