2 use std::convert::TryFrom;
4 use rustc_middle::ty::layout::TyAndLayout;
5 use rustc_middle::ty::Instance;
6 use rustc_middle::{mir, ty};
7 use rustc_target::abi::{self, LayoutOf as _};
8 use rustc_target::spec::abi::Abi;
11 FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, StackPopCleanup,
14 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
15 pub(super) fn eval_terminator(
17 terminator: &mir::Terminator<'tcx>,
18 ) -> InterpResult<'tcx> {
19 use rustc_middle::mir::TerminatorKind::*;
20 match terminator.kind {
22 self.pop_stack_frame(/* unwinding */ false)?
25 Goto { target } => self.go_to_block(target),
27 SwitchInt { ref discr, ref targets, switch_ty } => {
28 let discr = self.read_immediate(self.eval_operand(discr, None)?)?;
29 trace!("SwitchInt({:?})", *discr);
30 assert_eq!(discr.layout.ty, switch_ty);
32 // Branch to the `otherwise` case by default, if no match is found.
33 assert!(!targets.iter().is_empty());
34 let mut target_block = targets.otherwise();
36 for (const_int, target) in targets.iter() {
37 // Compare using binary_op, to also support pointer values
39 .overflowing_binary_op(
42 ImmTy::from_uint(const_int, discr.layout),
46 target_block = target;
51 self.go_to_block(target_block);
54 Call { ref func, ref args, destination, ref cleanup, from_hir_call: _, fn_span: _ } => {
55 let old_stack = self.frame_idx();
56 let old_loc = self.frame().loc;
57 let func = self.eval_operand(func, None)?;
58 let (fn_val, abi) = match *func.layout.ty.kind() {
60 let caller_abi = sig.abi();
61 let fn_ptr = self.read_scalar(func)?.check_init()?;
62 let fn_val = self.memory.get_fn(fn_ptr)?;
65 ty::FnDef(def_id, substs) => {
66 let sig = func.layout.ty.fn_sig(*self.tcx);
69 self.resolve(ty::WithOptConstParam::unknown(def_id), substs)?,
75 terminator.source_info.span,
76 "invalid callee of type {:?}",
80 let args = self.eval_operands(args)?;
81 let ret = match destination {
82 Some((dest, ret)) => Some((self.eval_place(dest)?, ret)),
85 self.eval_fn_call(fn_val, abi, &args[..], ret, *cleanup)?;
86 // Sanity-check that `eval_fn_call` either pushed a new frame or
87 // did a jump to another block.
88 if self.frame_idx() == old_stack && self.frame().loc == old_loc {
89 span_bug!(terminator.source_info.span, "evaluating this call made no progress");
93 Drop { place, target, unwind } => {
94 let place = self.eval_place(place)?;
95 let ty = place.layout.ty;
96 trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
98 let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
99 self.drop_in_place(place, instance, target, unwind)?;
102 Assert { ref cond, expected, ref msg, target, cleanup } => {
104 self.read_immediate(self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
105 if expected == cond_val {
106 self.go_to_block(target);
108 M::assert_panic(self, msg, cleanup)?;
116 // When we encounter Resume, we've finished unwinding
117 // cleanup for the current stack frame. We pop it in order
118 // to continue unwinding the next frame
120 trace!("unwinding: resuming from cleanup");
121 // By definition, a Resume terminator means
122 // that we're unwinding
123 self.pop_stack_frame(/* unwinding */ true)?;
127 // It is UB to ever encounter this.
128 Unreachable => throw_ub!(Unreachable),
130 // These should never occur for MIR we actually run.
131 DropAndReplace { .. }
135 | GeneratorDrop => span_bug!(
136 terminator.source_info.span,
137 "{:#?} should have been eliminated by MIR pass",
141 // Inline assembly can't be interpreted.
142 InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
148 fn check_argument_compat(
150 caller: TyAndLayout<'tcx>,
151 callee: TyAndLayout<'tcx>,
153 if caller.ty == callee.ty {
158 // Don't risk anything
162 match (&caller.abi, &callee.abi) {
163 // Different valid ranges are okay (once we enforce validity,
164 // that will take care to make it UB to leave the range, just
165 // like for transmute).
166 (abi::Abi::Scalar(ref caller), abi::Abi::Scalar(ref callee)) => {
167 caller.value == callee.value
170 abi::Abi::ScalarPair(ref caller1, ref caller2),
171 abi::Abi::ScalarPair(ref callee1, ref callee2),
172 ) => caller1.value == callee1.value && caller2.value == callee2.value,
178 /// Pass a single argument, checking the types for compatibility.
182 caller_arg: &mut impl Iterator<Item = OpTy<'tcx, M::PointerTag>>,
183 callee_arg: PlaceTy<'tcx, M::PointerTag>,
184 ) -> InterpResult<'tcx> {
185 if rust_abi && callee_arg.layout.is_zst() {
187 trace!("Skipping callee ZST");
190 let caller_arg = caller_arg.next().ok_or_else(|| {
191 err_ub_format!("calling a function with fewer arguments than it requires")
194 assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out");
197 if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) {
199 "calling a function with argument of type {:?} passing data of type {:?}",
200 callee_arg.layout.ty,
204 // We allow some transmutes here
205 self.copy_op_transmute(caller_arg, callee_arg)
208 /// Call this function -- pushing the stack frame and initializing the arguments.
211 fn_val: FnVal<'tcx, M::ExtraFnVal>,
213 args: &[OpTy<'tcx, M::PointerTag>],
214 ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
215 unwind: Option<mir::BasicBlock>,
216 ) -> InterpResult<'tcx> {
217 trace!("eval_fn_call: {:#?}", fn_val);
219 let instance = match fn_val {
220 FnVal::Instance(instance) => instance,
221 FnVal::Other(extra) => {
222 return M::call_extra_fn(self, extra, args, ret, unwind);
229 let instance_ty = instance.ty(*self.tcx, self.param_env);
230 match instance_ty.kind() {
231 ty::FnDef(..) => instance_ty.fn_sig(*self.tcx).abi(),
232 ty::Closure(..) => Abi::RustCall,
233 ty::Generator(..) => Abi::Rust,
234 _ => span_bug!(self.cur_span(), "unexpected callee ty: {:?}", instance_ty),
237 let normalize_abi = |abi| match abi {
238 Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic =>
239 // These are all the same ABI, really.
245 if normalize_abi(caller_abi) != normalize_abi(callee_abi) {
247 "calling a function with ABI {:?} using caller ABI {:?}",
255 ty::InstanceDef::Intrinsic(..) => {
256 assert!(caller_abi == Abi::RustIntrinsic || caller_abi == Abi::PlatformIntrinsic);
257 M::call_intrinsic(self, instance, args, ret, unwind)
259 ty::InstanceDef::VtableShim(..)
260 | ty::InstanceDef::ReifyShim(..)
261 | ty::InstanceDef::ClosureOnceShim { .. }
262 | ty::InstanceDef::FnPtrShim(..)
263 | ty::InstanceDef::DropGlue(..)
264 | ty::InstanceDef::CloneShim(..)
265 | ty::InstanceDef::Item(_) => {
266 // We need MIR for this fn
267 let body = match M::find_mir_or_eval_fn(self, instance, args, ret, unwind)? {
269 None => return Ok(()),
272 self.push_stack_frame(
276 StackPopCleanup::Goto { ret: ret.map(|p| p.1), unwind },
279 // If an error is raised here, pop the frame again to get an accurate backtrace.
280 // To this end, we wrap it all in a `try` block.
281 let res: InterpResult<'tcx> = try {
283 "caller ABI: {:?}, args: {:#?}",
286 .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
290 "spread_arg: {:?}, locals: {:#?}",
295 self.layout_of_local(self.frame(), local, None).unwrap().ty
300 // Figure out how to pass which arguments.
301 // The Rust ABI is special: ZST get skipped.
302 let rust_abi = match caller_abi {
303 Abi::Rust | Abi::RustCall => true,
306 // We have two iterators: Where the arguments come from,
307 // and where they go to.
309 // For where they come from: If the ABI is RustCall, we untuple the
310 // last incoming argument. These two iterators do not have the same type,
311 // so to keep the code paths uniform we accept an allocation
312 // (for RustCall ABI only).
313 let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
314 if caller_abi == Abi::RustCall && !args.is_empty() {
316 let (&untuple_arg, args) = args.split_last().unwrap();
317 trace!("eval_fn_call: Will pass last argument by untupling");
322 (0..untuple_arg.layout.fields.count())
323 .map(|i| self.operand_field(untuple_arg, i)),
325 .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
333 let mut caller_iter =
334 caller_args.iter().filter(|op| !rust_abi || !op.layout.is_zst()).copied();
336 // Now we have to spread them out across the callee's locals,
337 // taking into account the `spread_arg`. If we could write
338 // this is a single iterator (that handles `spread_arg`), then
339 // `pass_argument` would be the loop body. It takes care to
340 // not advance `caller_iter` for ZSTs.
341 for local in body.args_iter() {
342 let dest = self.eval_place(mir::Place::from(local))?;
343 if Some(local) == body.spread_arg {
345 for i in 0..dest.layout.fields.count() {
346 let dest = self.place_field(dest, i)?;
347 self.pass_argument(rust_abi, &mut caller_iter, dest)?;
351 self.pass_argument(rust_abi, &mut caller_iter, dest)?;
354 // Now we should have no more caller args
355 if caller_iter.next().is_some() {
356 throw_ub_format!("calling a function with more arguments than it expected")
358 // Don't forget to check the return type!
359 if let Some((caller_ret, _)) = ret {
360 let callee_ret = self.eval_place(mir::Place::return_place())?;
361 if !Self::check_argument_compat(
367 "calling a function with return type {:?} passing \
368 return place of type {:?}",
369 callee_ret.layout.ty,
374 let local = mir::RETURN_PLACE;
375 let callee_layout = self.layout_of_local(self.frame(), local, None)?;
376 if !callee_layout.abi.is_uninhabited() {
377 throw_ub_format!("calling a returning function without a return place")
383 self.stack_mut().pop();
389 // cannot use the shim here, because that will only result in infinite recursion
390 ty::InstanceDef::Virtual(_, idx) => {
391 let mut args = args.to_vec();
392 // We have to implement all "object safe receivers". Currently we
393 // support built-in pointers `(&, &mut, Box)` as well as unsized-self. We do
394 // not yet support custom self types.
395 // Also see `compiler/rustc_codegen_llvm/src/abi.rs` and `compiler/rustc_codegen_ssa/src/mir/block.rs`.
396 let receiver_place = match args[0].layout.ty.builtin_deref(true) {
399 self.deref_operand(args[0])?
403 args[0].assert_mem_place(self)
406 // Find and consult vtable
407 let vtable = receiver_place.vtable();
408 let drop_fn = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
410 // `*mut receiver_place.layout.ty` is almost the layout that we
411 // want for args[0]: We have to project to field 0 because we want
413 assert!(receiver_place.layout.is_unsized());
414 let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
415 let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?;
416 // Adjust receiver argument.
418 OpTy::from(ImmTy::from_immediate(receiver_place.ptr.into(), this_receiver_ptr));
419 trace!("Patched self operand to {:#?}", args[0]);
420 // recurse with concrete function
421 self.eval_fn_call(drop_fn, caller_abi, &args, ret, unwind)
428 place: PlaceTy<'tcx, M::PointerTag>,
429 instance: ty::Instance<'tcx>,
430 target: mir::BasicBlock,
431 unwind: Option<mir::BasicBlock>,
432 ) -> InterpResult<'tcx> {
433 trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
434 // We take the address of the object. This may well be unaligned, which is fine
435 // for us here. However, unaligned accesses will probably make the actual drop
436 // implementation fail -- a problem shared by rustc.
437 let place = self.force_allocation(place)?;
439 let (instance, place) = match place.layout.ty.kind() {
441 // Dropping a trait object.
442 self.unpack_dyn_trait(place)?
444 _ => (instance, place),
447 let arg = ImmTy::from_immediate(
449 self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
452 let ty = self.tcx.mk_unit(); // return type is ()
453 let dest = MPlaceTy::dangling(self.layout_of(ty)?, self);
456 FnVal::Instance(instance),
459 Some((dest.into(), target)),