]> git.lizzy.rs Git - rust.git/commitdiff
Clearer boundries between alloc metadata with multiple buffers and an individual...
authorAndy Wang <cbeuw.andy@gmail.com>
Sun, 1 May 2022 11:36:00 +0000 (12:36 +0100)
committerAndy Wang <cbeuw.andy@gmail.com>
Mon, 6 Jun 2022 18:15:21 +0000 (19:15 +0100)
src/data_race.rs
src/weak_memory.rs

index 82ee32ddee71f275c8f23a5f633ec6f5fd8eccc4..303cf7007e75344e7fca1bcd21e3f067b5cd09fa 100644 (file)
@@ -519,8 +519,8 @@ fn read_scalar_atomic(
                     global.sc_read();
                 }
                 let mut rng = this.machine.rng.borrow_mut();
-                let loaded = alloc_buffers.buffered_read(
-                    alloc_range(base_offset, place.layout.size),
+                let buffer = alloc_buffers.get_store_buffer(alloc_range(base_offset, place.layout.size));
+                let loaded = buffer.buffered_read(
                     global,
                     atomic == AtomicReadOp::SeqCst,
                     &mut *rng,
@@ -555,10 +555,9 @@ fn write_scalar_atomic(
             if atomic == AtomicWriteOp::SeqCst {
                 global.sc_write();
             }
-            let size = dest.layout.size;
-            alloc_buffers.buffered_write(
+            let mut buffer = alloc_buffers.get_store_buffer_mut(alloc_range(base_offset, dest.layout.size));
+            buffer.buffered_write(
                 val,
-                alloc_range(base_offset, size),
                 global,
                 atomic == AtomicWriteOp::SeqCst,
             )?;
@@ -708,7 +707,8 @@ fn atomic_compare_exchange_scalar(
                 let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
                 if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
                     if global.multi_threaded.get() {
-                        alloc_buffers.read_from_last_store(alloc_range(base_offset, size), global);
+                        let buffer = alloc_buffers.get_store_buffer(alloc_range(base_offset, size));
+                        buffer.read_from_last_store(global);
                     }
                 }
             }
@@ -735,10 +735,10 @@ fn buffered_atomic_rmw(
                 global.sc_read();
                 global.sc_write();
             }
-            let size = place.layout.size;
-            let range = alloc_range(base_offset, size);
-            alloc_buffers.read_from_last_store(range, global);
-            alloc_buffers.buffered_write(new_val, range, global, atomic == AtomicRwOp::SeqCst)?;
+            let range = alloc_range(base_offset, place.layout.size);
+            let mut buffer = alloc_buffers.get_store_buffer_mut(range);
+            buffer.read_from_last_store(global);
+            buffer.buffered_write(new_val, global, atomic == AtomicRwOp::SeqCst)?;
         }
         Ok(())
     }
index c82a31d0a89c1ac5606300d12598d7694120ad7e..2cf9a98b13351d3b83767fae18f73d0951e1956a 100644 (file)
@@ -53,33 +53,49 @@ pub fn new_allocation(len: Size) -> Self {
     }
 
     /// Gets a store buffer associated with an atomic object in this allocation
-    fn get_store_buffer(&self, range: AllocRange) -> Ref<'_, StoreBuffer> {
+    pub fn get_store_buffer(&self, range: AllocRange) -> Ref<'_, StoreBuffer> {
         Ref::map(self.store_buffer.borrow(), |range_map| {
             let (.., store_buffer) = range_map.iter(range.start, range.size).next().unwrap();
             store_buffer
         })
     }
 
-    fn get_store_buffer_mut(&self, range: AllocRange) -> RefMut<'_, StoreBuffer> {
+    pub fn get_store_buffer_mut(&self, range: AllocRange) -> RefMut<'_, StoreBuffer> {
         RefMut::map(self.store_buffer.borrow_mut(), |range_map| {
             let (.., store_buffer) = range_map.iter_mut(range.start, range.size).next().unwrap();
             store_buffer
         })
     }
 
+}
+
+const STORE_BUFFER_LIMIT: usize = 128;
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct StoreBuffer {
+    // Stores to this location in modification order
+    buffer: VecDeque<StoreElement>,
+}
+
+impl Default for StoreBuffer {
+    fn default() -> Self {
+        let mut buffer = VecDeque::new();
+        buffer.reserve(STORE_BUFFER_LIMIT);
+        Self { buffer }
+    }
+}
+
+impl<'mir, 'tcx: 'mir> StoreBuffer {
     /// Reads from the last store in modification order
-    pub fn read_from_last_store<'tcx>(&self, range: AllocRange, global: &GlobalState) {
-        let store_buffer = self.get_store_buffer(range);
-        let store_elem = store_buffer.buffer.back();
+    pub fn read_from_last_store(&self, global: &GlobalState) {
+        let store_elem = self.buffer.back();
         if let Some(store_elem) = store_elem {
             let (index, clocks) = global.current_thread_state();
             store_elem.load_impl(index, &clocks);
         }
     }
 
-    pub fn buffered_read<'tcx>(
+    pub fn buffered_read(
         &self,
-        range: AllocRange,
         global: &GlobalState,
         is_seqcst: bool,
         rng: &mut (impl rand::Rng + ?Sized),
@@ -87,14 +103,13 @@ pub fn buffered_read<'tcx>(
     ) -> InterpResult<'tcx, Option<ScalarMaybeUninit<Tag>>> {
         // Having a live borrow to store_buffer while calling validate_atomic_load is fine
         // because the race detector doesn't touch store_buffer
-        let store_buffer = self.get_store_buffer(range);
 
         let store_elem = {
             // The `clocks` we got here must be dropped before calling validate_atomic_load
             // as the race detector will update it
             let (.., clocks) = global.current_thread_state();
             // Load from a valid entry in the store buffer
-            store_buffer.fetch_store(is_seqcst, &clocks, &mut *rng)
+            self.fetch_store(is_seqcst, &clocks, &mut *rng)
         };
 
         // Unlike in write_scalar_atomic, thread clock updates have to be done
@@ -110,37 +125,18 @@ pub fn buffered_read<'tcx>(
         Ok(loaded)
     }
 
-    pub fn buffered_write<'tcx>(
+    pub fn buffered_write(
         &mut self,
         val: ScalarMaybeUninit<Tag>,
-        range: AllocRange,
         global: &GlobalState,
         is_seqcst: bool,
     ) -> InterpResult<'tcx> {
         let (index, clocks) = global.current_thread_state();
 
-        let mut store_buffer = self.get_store_buffer_mut(range);
-        store_buffer.store_impl(val, index, &clocks.clock, is_seqcst);
+        self.store_impl(val, index, &clocks.clock, is_seqcst);
         Ok(())
     }
-}
 
-const STORE_BUFFER_LIMIT: usize = 128;
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct StoreBuffer {
-    // Stores to this location in modification order
-    buffer: VecDeque<StoreElement>,
-}
-
-impl Default for StoreBuffer {
-    fn default() -> Self {
-        let mut buffer = VecDeque::new();
-        buffer.reserve(STORE_BUFFER_LIMIT);
-        Self { buffer }
-    }
-}
-
-impl<'mir, 'tcx: 'mir> StoreBuffer {
     /// Selects a valid store element in the buffer.
     /// The buffer does not contain the value used to initialise the atomic object
     /// so a fresh atomic object has an empty store buffer until an explicit store.