1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
3 //! The main entry point is the `step` method.
6 use rustc_middle::mir::interpret::{InterpResult, Scalar};
7 use rustc_target::abi::LayoutOf;
9 use super::{InterpCx, Machine};
11 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
12 /// same type as the result.
14 fn binop_left_homogeneous(op: mir::BinOp) -> bool {
15 use rustc_middle::mir::BinOp::*;
17 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
18 Eq | Ne | Lt | Le | Gt | Ge => false,
21 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
22 /// same type as the LHS.
24 fn binop_right_homogeneous(op: mir::BinOp) -> bool {
25 use rustc_middle::mir::BinOp::*;
27 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
28 Offset | Shl | Shr => false,
32 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
33 pub fn run(&mut self) -> InterpResult<'tcx> {
38 /// Returns `true` as long as there are more things to do.
40 /// This is used by [priroda](https://github.com/oli-obk/priroda)
42 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
44 pub fn step(&mut self) -> InterpResult<'tcx, bool> {
45 if self.stack().is_empty() {
49 let loc = match self.frame().loc {
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?;
59 let basic_block = &self.body().basic_blocks()[loc.block];
61 let old_frames = self.frame_idx();
63 if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
64 assert_eq!(old_frames, self.frame_idx());
65 self.statement(stmt)?;
69 M::before_terminator(self)?;
71 let terminator = basic_block.terminator();
72 assert_eq!(old_frames, self.frame_idx());
73 self.terminator(terminator)?;
77 fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
79 self.set_span(stmt.source_info.span);
81 use rustc_middle::mir::StatementKind::*;
83 // Some statements (e.g., box) push new stack frames.
84 // We have to record the stack frame number *before* executing the statement.
85 let frame_idx = self.frame_idx();
88 Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
90 SetDiscriminant { place, variant_index } => {
91 let dest = self.eval_place(**place)?;
92 self.write_discriminant_index(*variant_index, dest)?;
95 // Mark locals as alive
96 StorageLive(local) => {
97 let old_val = self.storage_live(*local)?;
98 self.deallocate_local(old_val)?;
101 // Mark locals as dead
102 StorageDead(local) => {
103 let old_val = self.storage_dead(*local);
104 self.deallocate_local(old_val)?;
107 // No dynamic semantics attached to `FakeRead`; MIR
108 // interpreter is solely intended for borrowck'ed code.
112 Retag(kind, place) => {
113 let dest = self.eval_place(**place)?;
114 M::retag(self, *kind, dest)?;
117 // Statements we do not track.
118 AscribeUserType(..) => {}
120 // Defined to do nothing. These are added by optimization passes, to avoid changing the
121 // size of MIR constantly.
124 LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
127 self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
131 /// Evaluate an assignment statement.
133 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
134 /// type writes its results directly into the memory specified by the place.
135 pub fn eval_rvalue_into_place(
137 rvalue: &mir::Rvalue<'tcx>,
138 place: mir::Place<'tcx>,
139 ) -> InterpResult<'tcx> {
140 let dest = self.eval_place(place)?;
142 use rustc_middle::mir::Rvalue::*;
144 Use(ref operand) => {
145 // Avoid recomputing the layout
146 let op = self.eval_operand(operand, Some(dest.layout))?;
147 self.copy_op(op, dest)?;
150 BinaryOp(bin_op, ref left, ref right) => {
151 let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
152 let left = self.read_immediate(self.eval_operand(left, layout)?)?;
153 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
154 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
155 self.binop_ignore_overflow(bin_op, left, right, dest)?;
158 CheckedBinaryOp(bin_op, ref left, ref right) => {
159 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
160 let left = self.read_immediate(self.eval_operand(left, None)?)?;
161 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
162 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
163 self.binop_with_overflow(bin_op, left, right, dest)?;
166 UnaryOp(un_op, ref operand) => {
167 // The operand always has the same type as the result.
168 let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
169 let val = self.unary_op(un_op, val)?;
170 assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
171 self.write_immediate(*val, dest)?;
174 Aggregate(ref kind, ref operands) => {
175 let (dest, active_field_index) = match **kind {
176 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
177 self.write_discriminant_index(variant_index, dest)?;
178 if adt_def.is_enum() {
179 (self.place_downcast(dest, variant_index)?, active_field_index)
181 (dest, active_field_index)
187 for (i, operand) in operands.iter().enumerate() {
188 let op = self.eval_operand(operand, None)?;
189 // Ignore zero-sized fields.
190 if !op.layout.is_zst() {
191 let field_index = active_field_index.unwrap_or(i);
192 let field_dest = self.place_field(dest, field_index)?;
193 self.copy_op(op, field_dest)?;
198 Repeat(ref operand, _) => {
199 let op = self.eval_operand(operand, None)?;
200 let dest = self.force_allocation(dest)?;
201 let length = dest.len(self)?;
203 if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
205 let first = self.mplace_field(dest, 0)?;
206 self.copy_op(op, first.into())?;
209 let elem_size = first.layout.size;
210 // Copy the rest. This is performance-sensitive code
211 // for big static/const arrays!
212 let rest_ptr = first_ptr.offset(elem_size, self)?;
213 self.memory.copy_repeatedly(
218 /*nonoverlapping:*/ true,
225 // FIXME(CTFE): don't allow computing the length of arrays in const eval
226 let src = self.eval_place(place)?;
227 let mplace = self.force_allocation(src)?;
228 let len = mplace.len(self)?;
229 self.write_scalar(Scalar::from_machine_usize(len, self), dest)?;
232 AddressOf(_, place) | Ref(_, _, place) => {
233 let src = self.eval_place(place)?;
234 let place = self.force_allocation(src)?;
235 if place.layout.size.bytes() > 0 {
236 // definitely not a ZST
237 assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
239 self.write_immediate(place.to_ref(), dest)?;
242 NullaryOp(mir::NullOp::Box, _) => {
243 M::box_alloc(self, dest)?;
246 NullaryOp(mir::NullOp::SizeOf, ty) => {
247 let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
248 let layout = self.layout_of(ty)?;
250 !layout.is_unsized(),
251 "SizeOf nullary MIR operator called for unsized type"
253 self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), dest)?;
256 Cast(kind, ref operand, _) => {
257 let src = self.eval_operand(operand, None)?;
258 self.cast(src, kind, dest)?;
261 Discriminant(place) => {
262 let op = self.eval_place_to_op(place, None)?;
263 let discr_val = self.read_discriminant(op)?.0;
264 let size = dest.layout.size;
265 self.write_scalar(Scalar::from_uint(discr_val, size), dest)?;
269 self.dump_place(*dest);
274 fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
275 info!("{:?}", terminator.kind);
276 self.set_span(terminator.source_info.span);
278 self.eval_terminator(terminator)?;
279 if !self.stack().is_empty() {
280 if let Some(loc) = self.frame().loc {
281 info!("// executing {:?}", loc.block);