]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/interpret/step.rs
Merge commit 'e214ea82ad0a751563acf67e1cd9279cf302db3a' into clippyup
[rust.git] / src / librustc_mir / interpret / step.rs
1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
2 //!
3 //! The main entry point is the `step` method.
4
5 use rustc_middle::mir;
6 use rustc_middle::mir::interpret::{InterpResult, Scalar};
7 use rustc_target::abi::LayoutOf;
8
9 use super::{InterpCx, Machine};
10
11 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
12 /// same type as the result.
13 #[inline]
14 fn binop_left_homogeneous(op: mir::BinOp) -> bool {
15     use rustc_middle::mir::BinOp::*;
16     match op {
17         Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
18         Eq | Ne | Lt | Le | Gt | Ge => false,
19     }
20 }
21 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
22 /// same type as the LHS.
23 #[inline]
24 fn binop_right_homogeneous(op: mir::BinOp) -> bool {
25     use rustc_middle::mir::BinOp::*;
26     match op {
27         Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
28         Offset | Shl | Shr => false,
29     }
30 }
31
32 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
33     pub fn run(&mut self) -> InterpResult<'tcx> {
34         while self.step()? {}
35         Ok(())
36     }
37
38     /// Returns `true` as long as there are more things to do.
39     ///
40     /// This is used by [priroda](https://github.com/oli-obk/priroda)
41     ///
42     /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
43     #[inline(always)]
44     pub fn step(&mut self) -> InterpResult<'tcx, bool> {
45         if self.stack().is_empty() {
46             return Ok(false);
47         }
48
49         let loc = match self.frame().loc {
50             Some(loc) => loc,
51             None => {
52                 // We are unwinding and this fn has no cleanup code.
53                 // Just go on unwinding.
54                 trace!("unwinding: skipping frame");
55                 self.pop_stack_frame(/* unwinding */ true)?;
56                 return Ok(true);
57             }
58         };
59         let basic_block = &self.body().basic_blocks()[loc.block];
60
61         let old_frames = self.frame_idx();
62
63         if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
64             assert_eq!(old_frames, self.frame_idx());
65             self.statement(stmt)?;
66             return Ok(true);
67         }
68
69         M::before_terminator(self)?;
70
71         let terminator = basic_block.terminator();
72         assert_eq!(old_frames, self.frame_idx());
73         self.terminator(terminator)?;
74         Ok(true)
75     }
76
77     fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
78         info!("{:?}", stmt);
79         self.set_span(stmt.source_info.span);
80
81         use rustc_middle::mir::StatementKind::*;
82
83         // Some statements (e.g., box) push new stack frames.
84         // We have to record the stack frame number *before* executing the statement.
85         let frame_idx = self.frame_idx();
86
87         match &stmt.kind {
88             Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
89
90             SetDiscriminant { place, variant_index } => {
91                 let dest = self.eval_place(**place)?;
92                 self.write_discriminant_index(*variant_index, dest)?;
93             }
94
95             // Mark locals as alive
96             StorageLive(local) => {
97                 let old_val = self.storage_live(*local)?;
98                 self.deallocate_local(old_val)?;
99             }
100
101             // Mark locals as dead
102             StorageDead(local) => {
103                 let old_val = self.storage_dead(*local);
104                 self.deallocate_local(old_val)?;
105             }
106
107             // No dynamic semantics attached to `FakeRead`; MIR
108             // interpreter is solely intended for borrowck'ed code.
109             FakeRead(..) => {}
110
111             // Stacked Borrows.
112             Retag(kind, place) => {
113                 let dest = self.eval_place(**place)?;
114                 M::retag(self, *kind, dest)?;
115             }
116
117             // Statements we do not track.
118             AscribeUserType(..) => {}
119
120             // Defined to do nothing. These are added by optimization passes, to avoid changing the
121             // size of MIR constantly.
122             Nop => {}
123
124             LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
125         }
126
127         self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
128         Ok(())
129     }
130
131     /// Evaluate an assignment statement.
132     ///
133     /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
134     /// type writes its results directly into the memory specified by the place.
135     pub fn eval_rvalue_into_place(
136         &mut self,
137         rvalue: &mir::Rvalue<'tcx>,
138         place: mir::Place<'tcx>,
139     ) -> InterpResult<'tcx> {
140         let dest = self.eval_place(place)?;
141
142         use rustc_middle::mir::Rvalue::*;
143         match *rvalue {
144             Use(ref operand) => {
145                 // Avoid recomputing the layout
146                 let op = self.eval_operand(operand, Some(dest.layout))?;
147                 self.copy_op(op, dest)?;
148             }
149
150             BinaryOp(bin_op, ref left, ref right) => {
151                 let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
152                 let left = self.read_immediate(self.eval_operand(left, layout)?)?;
153                 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
154                 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
155                 self.binop_ignore_overflow(bin_op, left, right, dest)?;
156             }
157
158             CheckedBinaryOp(bin_op, ref left, ref right) => {
159                 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
160                 let left = self.read_immediate(self.eval_operand(left, None)?)?;
161                 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
162                 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
163                 self.binop_with_overflow(bin_op, left, right, dest)?;
164             }
165
166             UnaryOp(un_op, ref operand) => {
167                 // The operand always has the same type as the result.
168                 let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
169                 let val = self.unary_op(un_op, val)?;
170                 assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
171                 self.write_immediate(*val, dest)?;
172             }
173
174             Aggregate(ref kind, ref operands) => {
175                 let (dest, active_field_index) = match **kind {
176                     mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
177                         self.write_discriminant_index(variant_index, dest)?;
178                         if adt_def.is_enum() {
179                             (self.place_downcast(dest, variant_index)?, active_field_index)
180                         } else {
181                             (dest, active_field_index)
182                         }
183                     }
184                     _ => (dest, None),
185                 };
186
187                 for (i, operand) in operands.iter().enumerate() {
188                     let op = self.eval_operand(operand, None)?;
189                     // Ignore zero-sized fields.
190                     if !op.layout.is_zst() {
191                         let field_index = active_field_index.unwrap_or(i);
192                         let field_dest = self.place_field(dest, field_index)?;
193                         self.copy_op(op, field_dest)?;
194                     }
195                 }
196             }
197
198             Repeat(ref operand, _) => {
199                 let op = self.eval_operand(operand, None)?;
200                 let dest = self.force_allocation(dest)?;
201                 let length = dest.len(self)?;
202
203                 if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
204                     // Write the first.
205                     let first = self.mplace_field(dest, 0)?;
206                     self.copy_op(op, first.into())?;
207
208                     if length > 1 {
209                         let elem_size = first.layout.size;
210                         // Copy the rest. This is performance-sensitive code
211                         // for big static/const arrays!
212                         let rest_ptr = first_ptr.offset(elem_size, self)?;
213                         self.memory.copy_repeatedly(
214                             first_ptr,
215                             rest_ptr,
216                             elem_size,
217                             length - 1,
218                             /*nonoverlapping:*/ true,
219                         )?;
220                     }
221                 }
222             }
223
224             Len(place) => {
225                 // FIXME(CTFE): don't allow computing the length of arrays in const eval
226                 let src = self.eval_place(place)?;
227                 let mplace = self.force_allocation(src)?;
228                 let len = mplace.len(self)?;
229                 self.write_scalar(Scalar::from_machine_usize(len, self), dest)?;
230             }
231
232             AddressOf(_, place) | Ref(_, _, place) => {
233                 let src = self.eval_place(place)?;
234                 let place = self.force_allocation(src)?;
235                 if place.layout.size.bytes() > 0 {
236                     // definitely not a ZST
237                     assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
238                 }
239                 self.write_immediate(place.to_ref(), dest)?;
240             }
241
242             NullaryOp(mir::NullOp::Box, _) => {
243                 M::box_alloc(self, dest)?;
244             }
245
246             NullaryOp(mir::NullOp::SizeOf, ty) => {
247                 let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
248                 let layout = self.layout_of(ty)?;
249                 assert!(
250                     !layout.is_unsized(),
251                     "SizeOf nullary MIR operator called for unsized type"
252                 );
253                 self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), dest)?;
254             }
255
256             Cast(kind, ref operand, _) => {
257                 let src = self.eval_operand(operand, None)?;
258                 self.cast(src, kind, dest)?;
259             }
260
261             Discriminant(place) => {
262                 let op = self.eval_place_to_op(place, None)?;
263                 let discr_val = self.read_discriminant(op)?.0;
264                 let size = dest.layout.size;
265                 self.write_scalar(Scalar::from_uint(discr_val, size), dest)?;
266             }
267         }
268
269         self.dump_place(*dest);
270
271         Ok(())
272     }
273
274     fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
275         info!("{:?}", terminator.kind);
276         self.set_span(terminator.source_info.span);
277
278         self.eval_terminator(terminator)?;
279         if !self.stack().is_empty() {
280             if let Some(loc) = self.frame().loc {
281                 info!("// executing {:?}", loc.block);
282             }
283         }
284         Ok(())
285     }
286 }