use rand::rngs::StdRng;
use rustc_data_structures::fx::FxHashMap;
-use rustc_middle::{mir, ty};
+use rustc_middle::{mir, ty::{self, layout::TyAndLayout}};
use rustc_target::abi::{LayoutOf, Size};
use rustc_ast::attr;
use rustc_span::symbol::{sym, Symbol};
}
}
+/// Cached layouts of primitive types
+#[derive(Default)]
+struct PrimitiveLayouts<'tcx> {
+ i32: RefCell<Option<TyAndLayout<'tcx>>>,
+ u32: RefCell<Option<TyAndLayout<'tcx>>>,
+}
+
+impl<'mir, 'tcx: 'mir> PrimitiveLayouts<'tcx> {
+ fn i32(&self, ecx: &MiriEvalContext<'mir, 'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ {
+ let layout_ref = self.i32.borrow();
+ if layout_ref.is_some() {
+ return Ok(layout_ref.unwrap());
+ }
+ }
+ let layout = ecx.layout_of(ecx.tcx.types.i32)?;
+ *self.i32.borrow_mut() = Some(layout);
+ Ok(layout)
+ }
+
+ fn u32(&self, ecx: &MiriEvalContext<'mir, 'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ {
+ let layout_ref = self.u32.borrow();
+ if layout_ref.is_some() {
+ return Ok(layout_ref.unwrap());
+ }
+ }
+ let layout = ecx.layout_of(ecx.tcx.types.u32)?;
+ *self.u32.borrow_mut() = Some(layout);
+ Ok(layout)
+ }
+}
+
/// The machine itself.
pub struct Evaluator<'tcx> {
/// Environment variables set by `setenv`.
/// The "time anchor" for this machine's monotone clock (for `Instant` simulation).
pub(crate) time_anchor: Instant,
+
+ /// Cached `TyLayout`s for primitive data types that are commonly used inside Miri.
+ primitive_layouts: PrimitiveLayouts<'tcx>,
}
impl<'tcx> Evaluator<'tcx> {
dir_handler: Default::default(),
panic_payload: None,
time_anchor: Instant::now(),
+ primitive_layouts: PrimitiveLayouts::default(),
}
}
}
}
}
+impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
+/// Provides convenience methods for use elsewhere
+pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
+ fn i32_layout(&self) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ let this = self.eval_context_ref();
+ this.machine.primitive_layouts.i32(this)
+ }
+
+ fn u32_layout(&self) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ let this = self.eval_context_ref();
+ this.machine.primitive_layouts.u32(this)
+ }
+}
+
/// Machine hook implementations.
impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> {
type MemoryKind = MiriMemoryKind;
// Ensure that the following read at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, attr_op, 4)?;
let attr_place = ecx.deref_operand(attr_op)?;
- let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
- let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
+ let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
ecx.read_scalar(kind_place.into())
}
// Ensure that the following write at an offset to the attr pointer is within bounds
assert_ptr_target_min_size(ecx, attr_op, 4)?;
let attr_place = ecx.deref_operand(attr_op)?;
- let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
- let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?;
+ let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?;
ecx.write_scalar(kind.into(), kind_place.into())
}
// Ensure that the following read at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
- let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
let locked_count_place =
- mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
+ mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
ecx.read_scalar(locked_count_place.into())
}
// Ensure that the following write at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
- let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
let locked_count_place =
- mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
+ mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
ecx.write_scalar(locked_count.into(), locked_count_place.into())
}
// Ensure that the following read at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
- let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
- let kind_place =
- mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?;
+ let kind_place = mutex_place.offset(
+ Size::from_bytes(kind_offset),
+ MemPlaceMeta::None,
+ ecx.i32_layout()?,
+ ecx,
+ )?;
ecx.read_scalar(kind_place.into())
}
// Ensure that the following write at an offset to the mutex pointer is within bounds
assert_ptr_target_min_size(ecx, mutex_op, 20)?;
let mutex_place = ecx.deref_operand(mutex_op)?;
- let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?;
let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
- let kind_place =
- mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?;
+ let kind_place = mutex_place.offset(
+ Size::from_bytes(kind_offset),
+ MemPlaceMeta::None,
+ ecx.i32_layout()?,
+ ecx,
+ )?;
ecx.write_scalar(kind.into(), kind_place.into())
}
// Ensure that the following read at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
- let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
let readers_place =
- rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
+ rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
ecx.read_scalar(readers_place.into())
}
// Ensure that the following write at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
- let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
let readers_place =
- rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?;
+ rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
ecx.write_scalar(readers.into(), readers_place.into())
}
// Ensure that the following read at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
- let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
let writers_place =
- rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
+ rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
ecx.read_scalar(writers_place.into())
}
// Ensure that the following write at an offset to the rwlock pointer is within bounds
assert_ptr_target_min_size(ecx, rwlock_op, 12)?;
let rwlock_place = ecx.deref_operand(rwlock_op)?;
- let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?;
let writers_place =
- rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?;
+ rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?;
ecx.write_scalar(writers.into(), writers_place.into())
}