1 //! Server-side handles and storage for per-handle data.
3 use std::collections::{BTreeMap, HashMap};
5 use std::num::NonZeroU32;
6 use std::ops::{Index, IndexMut};
7 use std::sync::atomic::{AtomicUsize, Ordering};
9 pub(super) type Handle = NonZeroU32;
11 pub(super) struct OwnedStore<T: 'static> {
12 counter: &'static AtomicUsize,
13 data: BTreeMap<Handle, T>,
16 impl<T> OwnedStore<T> {
17 pub(super) fn new(counter: &'static AtomicUsize) -> Self {
18 // Ensure the handle counter isn't 0, which would panic later,
19 // when `NonZeroU32::new` (aka `Handle::new`) is called in `alloc`.
20 assert_ne!(counter.load(Ordering::SeqCst), 0);
22 OwnedStore { counter, data: BTreeMap::new() }
26 impl<T> OwnedStore<T> {
27 pub(super) fn alloc(&mut self, x: T) -> Handle {
28 let counter = self.counter.fetch_add(1, Ordering::SeqCst);
29 let handle = Handle::new(counter as u32).expect("`proc_macro` handle counter overflowed");
30 assert!(self.data.insert(handle, x).is_none());
34 pub(super) fn take(&mut self, h: Handle) -> T {
35 self.data.remove(&h).expect("use-after-free in `proc_macro` handle")
39 impl<T> Index<Handle> for OwnedStore<T> {
41 fn index(&self, h: Handle) -> &T {
42 self.data.get(&h).expect("use-after-free in `proc_macro` handle")
46 impl<T> IndexMut<Handle> for OwnedStore<T> {
47 fn index_mut(&mut self, h: Handle) -> &mut T {
48 self.data.get_mut(&h).expect("use-after-free in `proc_macro` handle")
52 pub(super) struct InternedStore<T: 'static> {
54 interner: HashMap<T, Handle>,
57 impl<T: Copy + Eq + Hash> InternedStore<T> {
58 pub(super) fn new(counter: &'static AtomicUsize) -> Self {
59 InternedStore { owned: OwnedStore::new(counter), interner: HashMap::new() }
62 pub(super) fn alloc(&mut self, x: T) -> Handle {
63 let owned = &mut self.owned;
64 *self.interner.entry(x).or_insert_with(|| owned.alloc(x))
67 pub(super) fn copy(&mut self, h: Handle) -> T {