1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
3 //! The main entry point is the `step` method.
6 use rustc_middle::mir::interpret::{InterpResult, Scalar};
7 use rustc_target::abi::LayoutOf;
9 use super::{InterpCx, Machine};
11 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
12 /// same type as the result.
14 fn binop_left_homogeneous(op: mir::BinOp) -> bool {
15 use rustc_middle::mir::BinOp::*;
17 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
18 Eq | Ne | Lt | Le | Gt | Ge => false,
21 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
22 /// same type as the LHS.
24 fn binop_right_homogeneous(op: mir::BinOp) -> bool {
25 use rustc_middle::mir::BinOp::*;
27 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
28 Offset | Shl | Shr => false,
32 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
33 pub fn run(&mut self) -> InterpResult<'tcx> {
38 /// Returns `true` as long as there are more things to do.
40 /// This is used by [priroda](https://github.com/oli-obk/priroda)
42 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
44 pub fn step(&mut self) -> InterpResult<'tcx, bool> {
45 if self.stack().is_empty() {
49 let loc = match self.frame().loc {
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?;
59 let basic_block = &self.body().basic_blocks()[loc.block];
61 let old_frames = self.frame_idx();
63 if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
64 assert_eq!(old_frames, self.frame_idx());
65 self.statement(stmt)?;
69 M::before_terminator(self)?;
71 let terminator = basic_block.terminator();
72 assert_eq!(old_frames, self.frame_idx());
73 self.terminator(terminator)?;
77 /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
78 /// statement counter. This also moves the statement counter forward.
79 crate fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
82 use rustc_middle::mir::StatementKind::*;
84 // Some statements (e.g., box) push new stack frames.
85 // We have to record the stack frame number *before* executing the statement.
86 let frame_idx = self.frame_idx();
89 Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
91 SetDiscriminant { place, variant_index } => {
92 let dest = self.eval_place(**place)?;
93 self.write_discriminant(*variant_index, &dest)?;
96 // Mark locals as alive
97 StorageLive(local) => {
98 self.storage_live(*local)?;
101 // Mark locals as dead
102 StorageDead(local) => {
103 self.storage_dead(*local)?;
106 // No dynamic semantics attached to `FakeRead`; MIR
107 // interpreter is solely intended for borrowck'ed code.
111 Retag(kind, place) => {
112 let dest = self.eval_place(**place)?;
113 M::retag(self, *kind, &dest)?;
116 // Call CopyNonOverlapping
117 CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
118 let src = self.eval_operand(src, None)?;
119 let dst = self.eval_operand(dst, None)?;
120 let count = self.eval_operand(count, None)?;
121 self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
124 // Statements we do not track.
125 AscribeUserType(..) => {}
127 // Currently, Miri discards Coverage statements. Coverage statements are only injected
128 // via an optional compile time MIR pass and have no side effects. Since Coverage
129 // statements don't exist at the source level, it is safe for Miri to ignore them, even
130 // for undefined behavior (UB) checks.
132 // A coverage counter inside a const expression (for example, a counter injected in a
133 // const function) is discarded when the const is evaluated at compile time. Whether
134 // this should change, and/or how to implement a const eval counter, is a subject of the
137 // FIXME(#73156): Handle source code coverage in const eval
140 // Defined to do nothing. These are added by optimization passes, to avoid changing the
141 // size of MIR constantly.
144 LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
147 self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
151 /// Evaluate an assignment statement.
153 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
154 /// type writes its results directly into the memory specified by the place.
155 pub fn eval_rvalue_into_place(
157 rvalue: &mir::Rvalue<'tcx>,
158 place: mir::Place<'tcx>,
159 ) -> InterpResult<'tcx> {
160 let dest = self.eval_place(place)?;
162 use rustc_middle::mir::Rvalue::*;
164 ThreadLocalRef(did) => {
165 let id = M::thread_local_static_alloc_id(self, did)?;
166 let val = self.global_base_pointer(id.into())?;
167 self.write_scalar(val, &dest)?;
170 Use(ref operand) => {
171 // Avoid recomputing the layout
172 let op = self.eval_operand(operand, Some(dest.layout))?;
173 self.copy_op(&op, &dest)?;
176 BinaryOp(bin_op, box (ref left, ref right)) => {
177 let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
178 let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
179 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
180 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
181 self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
184 CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
185 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
186 let left = self.read_immediate(&self.eval_operand(left, None)?)?;
187 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
188 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
189 self.binop_with_overflow(bin_op, &left, &right, &dest)?;
192 UnaryOp(un_op, ref operand) => {
193 // The operand always has the same type as the result.
194 let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
195 let val = self.unary_op(un_op, &val)?;
196 assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
197 self.write_immediate(*val, &dest)?;
200 Aggregate(ref kind, ref operands) => {
201 let (dest, active_field_index) = match **kind {
202 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
203 self.write_discriminant(variant_index, &dest)?;
204 if adt_def.is_enum() {
205 (self.place_downcast(&dest, variant_index)?, active_field_index)
207 (dest, active_field_index)
213 for (i, operand) in operands.iter().enumerate() {
214 let op = self.eval_operand(operand, None)?;
215 // Ignore zero-sized fields.
216 if !op.layout.is_zst() {
217 let field_index = active_field_index.unwrap_or(i);
218 let field_dest = self.place_field(&dest, field_index)?;
219 self.copy_op(&op, &field_dest)?;
224 Repeat(ref operand, _) => {
225 let op = self.eval_operand(operand, None)?;
226 let dest = self.force_allocation(&dest)?;
227 let length = dest.len(self)?;
229 if let Some(first_ptr) = self.check_mplace_access(&dest, None)? {
231 let first = self.mplace_field(&dest, 0)?;
232 self.copy_op(&op, &first.into())?;
235 let elem_size = first.layout.size;
236 // Copy the rest. This is performance-sensitive code
237 // for big static/const arrays!
238 let rest_ptr = first_ptr.offset(elem_size, self)?;
239 self.memory.copy_repeatedly(
244 /*nonoverlapping:*/ true,
251 // FIXME(CTFE): don't allow computing the length of arrays in const eval
252 let src = self.eval_place(place)?;
253 let mplace = self.force_allocation(&src)?;
254 let len = mplace.len(self)?;
255 self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
258 AddressOf(_, place) | Ref(_, _, place) => {
259 let src = self.eval_place(place)?;
260 let place = self.force_allocation(&src)?;
261 if place.layout.size.bytes() > 0 {
262 // definitely not a ZST
263 assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
265 self.write_immediate(place.to_ref(), &dest)?;
268 NullaryOp(mir::NullOp::Box, _) => {
269 M::box_alloc(self, &dest)?;
272 NullaryOp(mir::NullOp::SizeOf, ty) => {
273 let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
274 let layout = self.layout_of(ty)?;
275 if layout.is_unsized() {
276 // FIXME: This should be a span_bug (#80742)
277 self.tcx.sess.delay_span_bug(
278 self.frame().current_span(),
279 &format!("SizeOf nullary MIR operator called for unsized type {}", ty),
281 throw_inval!(SizeOfUnsizedType(ty));
283 self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), &dest)?;
286 Cast(cast_kind, ref operand, cast_ty) => {
287 let src = self.eval_operand(operand, None)?;
288 let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
289 self.cast(&src, cast_kind, cast_ty, &dest)?;
292 Discriminant(place) => {
293 let op = self.eval_place_to_op(place, None)?;
294 let discr_val = self.read_discriminant(&op)?.0;
295 self.write_scalar(discr_val, &dest)?;
299 trace!("{:?}", self.dump_place(*dest));
304 fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
305 info!("{:?}", terminator.kind);
307 self.eval_terminator(terminator)?;
308 if !self.stack().is_empty() {
309 if let Ok(loc) = self.frame().loc {
310 info!("// executing {:?}", loc.block);