1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
3 //! The main entry point is the `step` method.
8 use rustc_middle::mir::interpret::{InterpResult, Scalar};
9 use rustc_middle::ty::layout::LayoutOf;
11 use super::{InterpCx, Machine};
13 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
14 /// same type as the result.
16 fn binop_left_homogeneous(op: mir::BinOp) -> bool {
17 use rustc_middle::mir::BinOp::*;
19 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
20 Eq | Ne | Lt | Le | Gt | Ge => false,
23 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
24 /// same type as the LHS.
26 fn binop_right_homogeneous(op: mir::BinOp) -> bool {
27 use rustc_middle::mir::BinOp::*;
29 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
30 Offset | Shl | Shr => false,
34 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
35 pub fn run(&mut self) -> InterpResult<'tcx> {
40 /// Returns `true` as long as there are more things to do.
42 /// This is used by [priroda](https://github.com/oli-obk/priroda)
44 /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
46 pub fn step(&mut self) -> InterpResult<'tcx, bool> {
47 if self.stack().is_empty() {
51 let Either::Left(loc) = self.frame().loc else {
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?;
58 let basic_block = &self.body().basic_blocks[loc.block];
60 if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
61 let old_frames = self.frame_idx();
62 self.statement(stmt)?;
63 // Make sure we are not updating `statement_index` of the wrong frame.
64 assert_eq!(old_frames, self.frame_idx());
65 // Advance the program counter.
66 self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
70 M::before_terminator(self)?;
72 let terminator = basic_block.terminator();
73 self.terminator(terminator)?;
77 /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
78 /// statement counter.
80 /// This does NOT move the statement counter forward, the caller has to do that!
81 pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
84 use rustc_middle::mir::StatementKind::*;
87 Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
89 SetDiscriminant { place, variant_index } => {
90 let dest = self.eval_place(**place)?;
91 self.write_discriminant(*variant_index, &dest)?;
95 let dest = self.eval_place(**place)?;
96 self.write_uninit(&dest)?;
99 // Mark locals as alive
100 StorageLive(local) => {
101 self.storage_live(*local)?;
104 // Mark locals as dead
105 StorageDead(local) => {
106 self.storage_dead(*local)?;
109 // No dynamic semantics attached to `FakeRead`; MIR
110 // interpreter is solely intended for borrowck'ed code.
114 Retag(kind, place) => {
115 let dest = self.eval_place(**place)?;
116 M::retag(self, *kind, &dest)?;
119 Intrinsic(box ref intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?,
121 // Statements we do not track.
122 AscribeUserType(..) => {}
124 // Currently, Miri discards Coverage statements. Coverage statements are only injected
125 // via an optional compile time MIR pass and have no side effects. Since Coverage
126 // statements don't exist at the source level, it is safe for Miri to ignore them, even
127 // for undefined behavior (UB) checks.
129 // A coverage counter inside a const expression (for example, a counter injected in a
130 // const function) is discarded when the const is evaluated at compile time. Whether
131 // this should change, and/or how to implement a const eval counter, is a subject of the
134 // FIXME(#73156): Handle source code coverage in const eval
137 // Defined to do nothing. These are added by optimization passes, to avoid changing the
138 // size of MIR constantly.
145 /// Evaluate an assignment statement.
147 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
148 /// type writes its results directly into the memory specified by the place.
149 pub fn eval_rvalue_into_place(
151 rvalue: &mir::Rvalue<'tcx>,
152 place: mir::Place<'tcx>,
153 ) -> InterpResult<'tcx> {
154 let dest = self.eval_place(place)?;
155 // FIXME: ensure some kind of non-aliasing between LHS and RHS?
156 // Also see https://github.com/rust-lang/rust/issues/68364.
158 use rustc_middle::mir::Rvalue::*;
160 ThreadLocalRef(did) => {
161 let ptr = M::thread_local_static_base_pointer(self, did)?;
162 self.write_pointer(ptr, &dest)?;
165 Use(ref operand) => {
166 // Avoid recomputing the layout
167 let op = self.eval_operand(operand, Some(dest.layout))?;
168 self.copy_op(&op, &dest, /*allow_transmute*/ false)?;
171 CopyForDeref(ref place) => {
172 let op = self.eval_place_to_op(*place, Some(dest.layout))?;
173 self.copy_op(&op, &dest, /* allow_transmute*/ false)?;
176 BinaryOp(bin_op, box (ref left, ref right)) => {
177 let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
178 let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
179 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
180 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
181 self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
184 CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
185 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
186 let left = self.read_immediate(&self.eval_operand(left, None)?)?;
187 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
188 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
189 self.binop_with_overflow(
190 bin_op, /*force_overflow_checks*/ false, &left, &right, &dest,
194 UnaryOp(un_op, ref operand) => {
195 // The operand always has the same type as the result.
196 let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
197 let val = self.unary_op(un_op, &val)?;
198 assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
199 self.write_immediate(*val, &dest)?;
202 Aggregate(box ref kind, ref operands) => {
203 assert!(matches!(kind, mir::AggregateKind::Array(..)));
205 for (field_index, operand) in operands.iter().enumerate() {
206 let op = self.eval_operand(operand, None)?;
207 let field_dest = self.place_field(&dest, field_index)?;
208 self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
212 Repeat(ref operand, _) => {
213 let src = self.eval_operand(operand, None)?;
214 assert!(src.layout.is_sized());
215 let dest = self.force_allocation(&dest)?;
216 let length = dest.len(self)?;
219 // Nothing to copy... but let's still make sure that `dest` as a place is valid.
220 self.get_place_alloc_mut(&dest)?;
222 // Write the src to the first element.
223 let first = self.mplace_field(&dest, 0)?;
224 self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
226 // This is performance-sensitive code for big static/const arrays! So we
227 // avoid writing each operand individually and instead just make many copies
228 // of the first element.
229 let elem_size = first.layout.size;
230 let first_ptr = first.ptr;
231 let rest_ptr = first_ptr.offset(elem_size, self)?;
232 // For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
233 // that place might be more aligned than its type mandates (a `u8` array could
234 // be 4-aligned if it sits at the right spot in a struct). Instead we use
235 // `first.layout.align`, i.e., the alignment given by the type.
236 self.mem_copy_repeatedly(
240 first.layout.align.abi,
243 /*nonoverlapping:*/ true,
249 let src = self.eval_place(place)?;
250 let op = self.place_to_op(&src)?;
251 let len = op.len(self)?;
252 self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
255 AddressOf(_, place) | Ref(_, _, place) => {
256 let src = self.eval_place(place)?;
257 let place = self.force_allocation(&src)?;
258 self.write_immediate(place.to_ref(self), &dest)?;
261 NullaryOp(null_op, ty) => {
262 let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty)?;
263 let layout = self.layout_of(ty)?;
264 if layout.is_unsized() {
265 // FIXME: This should be a span_bug (#80742)
266 self.tcx.sess.delay_span_bug(
267 self.frame().current_span(),
268 &format!("Nullary MIR operator called for unsized type {}", ty),
270 throw_inval!(SizeOfUnsizedType(ty));
272 let val = match null_op {
273 mir::NullOp::SizeOf => layout.size.bytes(),
274 mir::NullOp::AlignOf => layout.align.abi.bytes(),
276 self.write_scalar(Scalar::from_machine_usize(val, self), &dest)?;
279 ShallowInitBox(ref operand, _) => {
280 let src = self.eval_operand(operand, None)?;
281 let v = self.read_immediate(&src)?;
282 self.write_immediate(*v, &dest)?;
285 Cast(cast_kind, ref operand, cast_ty) => {
286 let src = self.eval_operand(operand, None)?;
288 self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
289 self.cast(&src, cast_kind, cast_ty, &dest)?;
292 Discriminant(place) => {
293 let op = self.eval_place_to_op(place, None)?;
294 let discr_val = self.read_discriminant(&op)?.0;
295 self.write_scalar(discr_val, &dest)?;
299 trace!("{:?}", self.dump_place(*dest));
304 /// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly.
305 fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
306 info!("{:?}", terminator.kind);
308 self.eval_terminator(terminator)?;
309 if !self.stack().is_empty() {
310 if let Either::Left(loc) = self.frame().loc {
311 info!("// executing {:?}", loc.block);