global.sc_read();
}
let mut rng = this.machine.rng.borrow_mut();
- let loaded = alloc_buffers.buffered_read(
- alloc_range(base_offset, place.layout.size),
+ let buffer = alloc_buffers.get_store_buffer(alloc_range(base_offset, place.layout.size));
+ let loaded = buffer.buffered_read(
global,
atomic == AtomicReadOp::SeqCst,
&mut *rng,
if atomic == AtomicWriteOp::SeqCst {
global.sc_write();
}
- let size = dest.layout.size;
- alloc_buffers.buffered_write(
+ let mut buffer = alloc_buffers.get_store_buffer_mut(alloc_range(base_offset, dest.layout.size));
+ buffer.buffered_write(
val,
- alloc_range(base_offset, size),
global,
atomic == AtomicWriteOp::SeqCst,
)?;
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
if global.multi_threaded.get() {
- alloc_buffers.read_from_last_store(alloc_range(base_offset, size), global);
+ let buffer = alloc_buffers.get_store_buffer(alloc_range(base_offset, size));
+ buffer.read_from_last_store(global);
}
}
}
global.sc_read();
global.sc_write();
}
- let size = place.layout.size;
- let range = alloc_range(base_offset, size);
- alloc_buffers.read_from_last_store(range, global);
- alloc_buffers.buffered_write(new_val, range, global, atomic == AtomicRwOp::SeqCst)?;
+ let range = alloc_range(base_offset, place.layout.size);
+ let mut buffer = alloc_buffers.get_store_buffer_mut(range);
+ buffer.read_from_last_store(global);
+ buffer.buffered_write(new_val, global, atomic == AtomicRwOp::SeqCst)?;
}
Ok(())
}
}
/// Gets a store buffer associated with an atomic object in this allocation
- fn get_store_buffer(&self, range: AllocRange) -> Ref<'_, StoreBuffer> {
+ pub fn get_store_buffer(&self, range: AllocRange) -> Ref<'_, StoreBuffer> {
Ref::map(self.store_buffer.borrow(), |range_map| {
let (.., store_buffer) = range_map.iter(range.start, range.size).next().unwrap();
store_buffer
})
}
- fn get_store_buffer_mut(&self, range: AllocRange) -> RefMut<'_, StoreBuffer> {
+ pub fn get_store_buffer_mut(&self, range: AllocRange) -> RefMut<'_, StoreBuffer> {
RefMut::map(self.store_buffer.borrow_mut(), |range_map| {
let (.., store_buffer) = range_map.iter_mut(range.start, range.size).next().unwrap();
store_buffer
})
}
+}
+
+const STORE_BUFFER_LIMIT: usize = 128;
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct StoreBuffer {
+ // Stores to this location in modification order
+ buffer: VecDeque<StoreElement>,
+}
+
+impl Default for StoreBuffer {
+ fn default() -> Self {
+ let mut buffer = VecDeque::new();
+ buffer.reserve(STORE_BUFFER_LIMIT);
+ Self { buffer }
+ }
+}
+
+impl<'mir, 'tcx: 'mir> StoreBuffer {
/// Reads from the last store in modification order
- pub fn read_from_last_store<'tcx>(&self, range: AllocRange, global: &GlobalState) {
- let store_buffer = self.get_store_buffer(range);
- let store_elem = store_buffer.buffer.back();
+ pub fn read_from_last_store(&self, global: &GlobalState) {
+ let store_elem = self.buffer.back();
if let Some(store_elem) = store_elem {
let (index, clocks) = global.current_thread_state();
store_elem.load_impl(index, &clocks);
}
}
- pub fn buffered_read<'tcx>(
+ pub fn buffered_read(
&self,
- range: AllocRange,
global: &GlobalState,
is_seqcst: bool,
rng: &mut (impl rand::Rng + ?Sized),
) -> InterpResult<'tcx, Option<ScalarMaybeUninit<Tag>>> {
// Having a live borrow to store_buffer while calling validate_atomic_load is fine
// because the race detector doesn't touch store_buffer
- let store_buffer = self.get_store_buffer(range);
let store_elem = {
// The `clocks` we got here must be dropped before calling validate_atomic_load
// as the race detector will update it
let (.., clocks) = global.current_thread_state();
// Load from a valid entry in the store buffer
- store_buffer.fetch_store(is_seqcst, &clocks, &mut *rng)
+ self.fetch_store(is_seqcst, &clocks, &mut *rng)
};
// Unlike in write_scalar_atomic, thread clock updates have to be done
Ok(loaded)
}
- pub fn buffered_write<'tcx>(
+ pub fn buffered_write(
&mut self,
val: ScalarMaybeUninit<Tag>,
- range: AllocRange,
global: &GlobalState,
is_seqcst: bool,
) -> InterpResult<'tcx> {
let (index, clocks) = global.current_thread_state();
- let mut store_buffer = self.get_store_buffer_mut(range);
- store_buffer.store_impl(val, index, &clocks.clock, is_seqcst);
+ self.store_impl(val, index, &clocks.clock, is_seqcst);
Ok(())
}
-}
-const STORE_BUFFER_LIMIT: usize = 128;
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct StoreBuffer {
- // Stores to this location in modification order
- buffer: VecDeque<StoreElement>,
-}
-
-impl Default for StoreBuffer {
- fn default() -> Self {
- let mut buffer = VecDeque::new();
- buffer.reserve(STORE_BUFFER_LIMIT);
- Self { buffer }
- }
-}
-
-impl<'mir, 'tcx: 'mir> StoreBuffer {
/// Selects a valid store element in the buffer.
/// The buffer does not contain the value used to initialise the atomic object
/// so a fresh atomic object has an empty store buffer until an explicit store.