]> git.lizzy.rs Git - rust.git/blobdiff - compiler/rustc_ast/src/attr/mod.rs
Rollup merge of #101389 - lukaslueg:rcgetmutdocs, r=m-ou-se
[rust.git] / compiler / rustc_ast / src / attr / mod.rs
index 67affb622f8daffa1a6146c8856619f6874e93bc..990f4f8f1329f2ab2d53171088527d02491ae0d8 100644 (file)
 
 use std::cell::Cell;
 use std::iter;
+#[cfg(debug_assertions)]
+use std::ops::BitXor;
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicU32, Ordering};
 
 pub struct MarkedAttrs(GrowableBitSet<AttrId>);
 
@@ -350,17 +354,36 @@ pub fn mk_nested_word_item(ident: Ident) -> NestedMetaItem {
 
 pub struct AttrIdGenerator(WorkerLocal<Cell<u32>>);
 
+#[cfg(debug_assertions)]
+static MAX_ATTR_ID: AtomicU32 = AtomicU32::new(u32::MAX);
+
 impl AttrIdGenerator {
     pub fn new() -> Self {
         // We use `(index as u32).reverse_bits()` to initialize the
         // starting value of AttrId in each worker thread.
         // The `index` is the index of the worker thread.
         // This ensures that the AttrId generated in each thread is unique.
-        AttrIdGenerator(WorkerLocal::new(|index| Cell::new((index as u32).reverse_bits())))
+        AttrIdGenerator(WorkerLocal::new(|index| {
+            let index: u32 = index.try_into().unwrap();
+
+            #[cfg(debug_assertions)]
+            {
+                let max_id = ((index + 1).next_power_of_two() - 1).bitxor(u32::MAX).reverse_bits();
+                MAX_ATTR_ID.fetch_min(max_id, Ordering::Release);
+            }
+
+            Cell::new(index.reverse_bits())
+        }))
     }
 
     pub fn mk_attr_id(&self) -> AttrId {
         let id = self.0.get();
+
+        // Ensure the assigned attr_id does not overlap the bits
+        // representing the number of threads.
+        #[cfg(debug_assertions)]
+        assert!(id <= MAX_ATTR_ID.load(Ordering::Acquire));
+
         self.0.set(id + 1);
         AttrId::from_u32(id)
     }