3 use rustc_ast::ast::InlineAsmOptions;
4 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
5 use rustc_middle::ty::Instance;
10 use rustc_target::abi;
11 use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode};
12 use rustc_target::spec::abi::Abi;
15 FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, Operand,
16 PlaceTy, Scalar, StackPopCleanup, StackPopUnwind,
19 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
20 pub(super) fn eval_terminator(
22 terminator: &mir::Terminator<'tcx>,
23 ) -> InterpResult<'tcx> {
24 use rustc_middle::mir::TerminatorKind::*;
25 match terminator.kind {
27 self.pop_stack_frame(/* unwinding */ false)?
30 Goto { target } => self.go_to_block(target),
32 SwitchInt { ref discr, ref targets, switch_ty } => {
33 let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
34 trace!("SwitchInt({:?})", *discr);
35 assert_eq!(discr.layout.ty, switch_ty);
37 // Branch to the `otherwise` case by default, if no match is found.
38 let mut target_block = targets.otherwise();
40 for (const_int, target) in targets.iter() {
41 // Compare using MIR BinOp::Eq, to also support pointer values.
42 // (Avoiding `self.binary_op` as that does some redundant layout computation.)
44 .overflowing_binary_op(
47 &ImmTy::from_uint(const_int, discr.layout),
51 target_block = target;
56 self.go_to_block(target_block);
68 let old_stack = self.frame_idx();
69 let old_loc = self.frame().loc;
70 let func = self.eval_operand(func, None)?;
71 let args = self.eval_operands(args)?;
73 let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
75 self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
76 let extra_args = &args[fn_sig.inputs().len()..];
77 let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty));
79 let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
81 let fn_ptr = self.read_pointer(&func)?;
82 let fn_val = self.get_ptr_fn(fn_ptr)?;
83 (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
85 ty::FnDef(def_id, substs) => {
87 self.resolve(ty::WithOptConstParam::unknown(def_id), substs)?;
89 FnVal::Instance(instance),
90 self.fn_abi_of_instance(instance, extra_args)?,
91 instance.def.requires_caller_location(*self.tcx),
95 terminator.source_info.span,
96 "invalid callee of type {:?}",
101 let destination = self.eval_place(destination)?;
104 (fn_sig.abi, fn_abi),
106 with_caller_location,
109 match (cleanup, fn_abi.can_unwind) {
110 (Some(cleanup), true) => StackPopUnwind::Cleanup(*cleanup),
111 (None, true) => StackPopUnwind::Skip,
112 (_, false) => StackPopUnwind::NotAllowed,
115 // Sanity-check that `eval_fn_call` either pushed a new frame or
116 // did a jump to another block.
117 if self.frame_idx() == old_stack && self.frame().loc == old_loc {
118 span_bug!(terminator.source_info.span, "evaluating this call made no progress");
122 Drop { place, target, unwind } => {
123 let place = self.eval_place(place)?;
124 let ty = place.layout.ty;
125 trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
127 let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
128 self.drop_in_place(&place, instance, target, unwind)?;
131 Assert { ref cond, expected, ref msg, target, cleanup } => {
132 let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
133 if expected == cond_val {
134 self.go_to_block(target);
136 M::assert_panic(self, msg, cleanup)?;
141 M::abort(self, "the program aborted execution".to_owned())?;
144 // When we encounter Resume, we've finished unwinding
145 // cleanup for the current stack frame. We pop it in order
146 // to continue unwinding the next frame
148 trace!("unwinding: resuming from cleanup");
149 // By definition, a Resume terminator means
150 // that we're unwinding
151 self.pop_stack_frame(/* unwinding */ true)?;
155 // It is UB to ever encounter this.
156 Unreachable => throw_ub!(Unreachable),
158 // These should never occur for MIR we actually run.
159 DropAndReplace { .. }
163 | GeneratorDrop => span_bug!(
164 terminator.source_info.span,
165 "{:#?} should have been eliminated by MIR pass",
169 InlineAsm { template, ref operands, options, destination, .. } => {
170 M::eval_inline_asm(self, template, operands, options)?;
171 if options.contains(InlineAsmOptions::NORETURN) {
172 throw_ub_format!("returned from noreturn inline assembly");
176 .expect("InlineAsm terminators without noreturn must have a destination"),
184 fn check_argument_compat(
185 caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
186 callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
188 // Heuristic for type comparison.
189 let layout_compat = || {
190 if caller_abi.layout.ty == callee_abi.layout.ty {
194 if caller_abi.layout.is_unsized() || callee_abi.layout.is_unsized() {
195 // No, no, no. We require the types to *exactly* match for unsized arguments. If
196 // these are somehow unsized "in a different way" (say, `dyn Trait` vs `[i32]`),
197 // then who knows what happens.
200 if caller_abi.layout.size != callee_abi.layout.size
201 || caller_abi.layout.align.abi != callee_abi.layout.align.abi
203 // This cannot go well...
206 // The rest *should* be okay, but we are extra conservative.
207 match (caller_abi.layout.abi, callee_abi.layout.abi) {
208 // Different valid ranges are okay (once we enforce validity,
209 // that will take care to make it UB to leave the range, just
210 // like for transmute).
211 (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
212 caller.primitive() == callee.primitive()
215 abi::Abi::ScalarPair(caller1, caller2),
216 abi::Abi::ScalarPair(callee1, callee2),
218 caller1.primitive() == callee1.primitive()
219 && caller2.primitive() == callee2.primitive()
225 // When comparing the PassMode, we have to be smart about comparing the attributes.
226 let arg_attr_compat = |a1: &ArgAttributes, a2: &ArgAttributes| {
227 // There's only one regular attribute that matters for the call ABI: InReg.
228 // Everything else is things like noalias, dereferenceable, nonnull, ...
229 // (This also applies to pointee_size, pointee_align.)
230 if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
234 // We also compare the sign extension mode -- this could let the callee make assumptions
235 // about bits that conceptually were not even passed.
236 if a1.arg_ext != a2.arg_ext {
241 let mode_compat = || match (&caller_abi.mode, &callee_abi.mode) {
242 (PassMode::Ignore, PassMode::Ignore) => true,
243 (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2),
244 (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => {
245 arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2)
247 (PassMode::Cast(c1, pad1), PassMode::Cast(c2, pad2)) => c1 == c2 && pad1 == pad2,
249 PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 },
250 PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 },
251 ) => arg_attr_compat(a1, a2) && s1 == s2,
253 PassMode::Indirect { attrs: a1, extra_attrs: Some(e1), on_stack: s1 },
254 PassMode::Indirect { attrs: a2, extra_attrs: Some(e2), on_stack: s2 },
255 ) => arg_attr_compat(a1, a2) && arg_attr_compat(e1, e2) && s1 == s2,
259 if layout_compat() && mode_compat() {
263 "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
270 /// Initialize a single callee argument, checking the types for compatibility.
271 fn pass_argument<'x, 'y>(
273 caller_args: &mut impl Iterator<
274 Item = (&'x OpTy<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
276 callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
277 callee_arg: &PlaceTy<'tcx, M::Provenance>,
278 ) -> InterpResult<'tcx>
283 if matches!(callee_abi.mode, PassMode::Ignore) {
284 // This one is skipped.
287 // Find next caller arg.
288 let (caller_arg, caller_abi) = caller_args.next().ok_or_else(|| {
289 err_ub_format!("calling a function with fewer arguments than it requires")
292 if !Self::check_argument_compat(caller_abi, callee_abi) {
294 "calling a function with argument of type {:?} passing data of type {:?}",
295 callee_arg.layout.ty,
299 // Special handling for unsized parameters.
300 if caller_arg.layout.is_unsized() {
301 // `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
302 assert_eq!(caller_arg.layout.ty, callee_arg.layout.ty);
303 // We have to properly pre-allocate the memory for the callee.
304 // So let's tear down some wrappers.
305 // This all has to be in memory, there are no immediate unsized values.
306 let src = caller_arg.assert_mem_place();
307 // The destination cannot be one of these "spread args".
308 let (dest_frame, dest_local) = callee_arg.assert_local();
309 // We are just initializing things, so there can't be anything here yet.
311 *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
312 Operand::Immediate(Immediate::Uninit)
314 // Allocate enough memory to hold `src`.
315 let Some((size, align)) = self.size_and_align_of_mplace(&src)? else {
316 span_bug!(self.cur_span(), "unsized fn arg with `extern` type tail should not be allowed")
318 let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
320 MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), callee_arg.layout, src.meta);
321 // Update the local to be that new place.
322 *M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
324 // We allow some transmutes here.
325 // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
326 // is true for all `copy_op`, but there are a lot of special cases for argument passing
328 self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
331 /// Call this function -- pushing the stack frame and initializing the arguments.
333 /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
334 /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
336 /// `with_caller_location` indicates whether the caller passed a caller location. Miri
337 /// implements caller locations without argument passing, but to match `FnAbi` we need to know
338 /// when those arguments are present.
339 pub(crate) fn eval_fn_call(
341 fn_val: FnVal<'tcx, M::ExtraFnVal>,
342 (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
343 args: &[OpTy<'tcx, M::Provenance>],
344 with_caller_location: bool,
345 destination: &PlaceTy<'tcx, M::Provenance>,
346 target: Option<mir::BasicBlock>,
347 mut unwind: StackPopUnwind,
348 ) -> InterpResult<'tcx> {
349 trace!("eval_fn_call: {:#?}", fn_val);
351 let instance = match fn_val {
352 FnVal::Instance(instance) => instance,
353 FnVal::Other(extra) => {
354 return M::call_extra_fn(
367 ty::InstanceDef::Intrinsic(def_id) => {
368 assert!(self.tcx.is_intrinsic(def_id));
369 // caller_fn_abi is not relevant here, we interpret the arguments directly for each intrinsic.
370 M::call_intrinsic(self, instance, args, destination, target, unwind)
372 ty::InstanceDef::VTableShim(..)
373 | ty::InstanceDef::ReifyShim(..)
374 | ty::InstanceDef::ClosureOnceShim { .. }
375 | ty::InstanceDef::FnPtrShim(..)
376 | ty::InstanceDef::DropGlue(..)
377 | ty::InstanceDef::CloneShim(..)
378 | ty::InstanceDef::Item(_) => {
379 // We need MIR for this fn
380 let Some((body, instance)) =
381 M::find_mir_or_eval_fn(self, instance, caller_abi, args, destination, target, unwind)? else {
385 // Compute callee information using the `instance` returned by
386 // `find_mir_or_eval_fn`.
387 // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
388 let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
390 if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
391 throw_unsup_format!("calling a c-variadic function is not supported");
394 if M::enforce_abi(self) {
395 if caller_fn_abi.conv != callee_fn_abi.conv {
397 "calling a function with calling convention {:?} using calling convention {:?}",
404 if !matches!(unwind, StackPopUnwind::NotAllowed) && !callee_fn_abi.can_unwind {
405 // The callee cannot unwind.
406 unwind = StackPopUnwind::NotAllowed;
409 self.push_stack_frame(
413 StackPopCleanup::Goto { ret: target, unwind },
416 // If an error is raised here, pop the frame again to get an accurate backtrace.
417 // To this end, we wrap it all in a `try` block.
418 let res: InterpResult<'tcx> = try {
420 "caller ABI: {:?}, args: {:#?}",
423 .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
427 "spread_arg: {:?}, locals: {:#?}",
432 self.layout_of_local(self.frame(), local, None).unwrap().ty
437 // In principle, we have two iterators: Where the arguments come from, and where
440 // For where they come from: If the ABI is RustCall, we untuple the
441 // last incoming argument. These two iterators do not have the same type,
442 // so to keep the code paths uniform we accept an allocation
443 // (for RustCall ABI only).
444 let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> =
445 if caller_abi == Abi::RustCall && !args.is_empty() {
447 let (untuple_arg, args) = args.split_last().unwrap();
448 trace!("eval_fn_call: Will pass last argument by untupling");
451 .map(|a| Ok(a.clone()))
453 (0..untuple_arg.layout.fields.count())
454 .map(|i| self.operand_field(untuple_arg, i)),
456 .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::Provenance>>>>(
463 // If `with_caller_location` is set we pretend there is an extra argument (that
464 // we will not pass).
466 caller_args.len() + if with_caller_location { 1 } else { 0 },
467 caller_fn_abi.args.len(),
468 "mismatch between caller ABI and caller arguments",
470 let mut caller_args = caller_args
472 .zip(caller_fn_abi.args.iter())
473 .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
475 // Now we have to spread them out across the callee's locals,
476 // taking into account the `spread_arg`. If we could write
477 // this is a single iterator (that handles `spread_arg`), then
478 // `pass_argument` would be the loop body. It takes care to
479 // not advance `caller_iter` for ZSTs.
480 let mut callee_args_abis = callee_fn_abi.args.iter();
481 for local in body.args_iter() {
482 let dest = self.eval_place(mir::Place::from(local))?;
483 if Some(local) == body.spread_arg {
485 for i in 0..dest.layout.fields.count() {
486 let dest = self.place_field(&dest, i)?;
487 let callee_abi = callee_args_abis.next().unwrap();
488 self.pass_argument(&mut caller_args, callee_abi, &dest)?;
492 let callee_abi = callee_args_abis.next().unwrap();
493 self.pass_argument(&mut caller_args, callee_abi, &dest)?;
496 // If the callee needs a caller location, pretend we consume one more argument from the ABI.
497 if instance.def.requires_caller_location(*self.tcx) {
498 callee_args_abis.next().unwrap();
500 // Now we should have no more caller args or callee arg ABIs
502 callee_args_abis.next().is_none(),
503 "mismatch between callee ABI and callee body arguments"
505 if caller_args.next().is_some() {
506 throw_ub_format!("calling a function with more arguments than it expected")
508 // Don't forget to check the return type!
509 if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
511 "calling a function with return type {:?} passing \
512 return place of type {:?}",
513 callee_fn_abi.ret.layout.ty,
514 caller_fn_abi.ret.layout.ty,
520 self.stack_mut().pop();
526 // cannot use the shim here, because that will only result in infinite recursion
527 ty::InstanceDef::Virtual(def_id, idx) => {
528 let mut args = args.to_vec();
529 // We have to implement all "object safe receivers". So we have to go search for a
530 // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
531 // unwrap those newtypes until we are there.
532 let mut receiver = args[0].clone();
533 let receiver_place = loop {
534 match receiver.layout.ty.kind() {
535 ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
536 ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values
538 // Not there yet, search for the only non-ZST field.
539 let mut non_zst_field = None;
540 for i in 0..receiver.layout.fields.count() {
541 let field = self.operand_field(&receiver, i)?;
543 field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
546 non_zst_field.is_none(),
547 "multiple non-ZST fields in dyn receiver type {}",
550 non_zst_field = Some(field);
553 receiver = non_zst_field.unwrap_or_else(|| {
555 "no non-ZST fields in dyn receiver type {}",
562 // Obtain the underlying trait we are working on.
563 let receiver_tail = self
565 .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
566 let ty::Dynamic(data, ..) = receiver_tail.kind() else {
567 span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
570 // Get the required information from the vtable.
571 let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
572 let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
573 if dyn_trait != data.principal() {
575 "`dyn` call on a pointer whose vtable does not match its type"
579 // Now determine the actual method to call. We can do that in two different ways and
580 // compare them to ensure everything fits.
581 let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
582 throw_ub_format!("`dyn` call trying to call something that is not a method")
584 if cfg!(debug_assertions) {
587 let trait_def_id = tcx.trait_of_item(def_id).unwrap();
588 let virtual_trait_ref =
589 ty::TraitRef::from_method(tcx, trait_def_id, instance.substs);
592 virtual_trait_ref.self_ty(),
593 "mismatch in underlying dyn trait computation within Miri and MIR building",
595 let existential_trait_ref =
596 ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
597 let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
599 let concrete_method = Instance::resolve_for_vtable(
603 instance.substs.rebase_onto(tcx, trait_def_id, concrete_trait_ref.substs),
606 assert_eq!(fn_inst, concrete_method);
609 // `*mut receiver_place.layout.ty` is almost the layout that we
610 // want for args[0]: We have to project to field 0 because we want
612 assert!(receiver_place.layout.is_unsized());
613 let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
614 let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
615 // Adjust receiver argument.
616 args[0] = OpTy::from(ImmTy::from_immediate(
617 Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
620 trace!("Patched receiver operand to {:#?}", args[0]);
621 // recurse with concrete function
623 FnVal::Instance(fn_inst),
624 (caller_abi, caller_fn_abi),
626 with_caller_location,
637 place: &PlaceTy<'tcx, M::Provenance>,
638 instance: ty::Instance<'tcx>,
639 target: mir::BasicBlock,
640 unwind: Option<mir::BasicBlock>,
641 ) -> InterpResult<'tcx> {
642 trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
643 // We take the address of the object. This may well be unaligned, which is fine
644 // for us here. However, unaligned accesses will probably make the actual drop
645 // implementation fail -- a problem shared by rustc.
646 let place = self.force_allocation(place)?;
648 let (instance, place) = match place.layout.ty.kind() {
650 // Dropping a trait object. Need to find actual drop fn.
651 let place = self.unpack_dyn_trait(&place)?;
652 let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
655 _ => (instance, place),
657 let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
659 let arg = ImmTy::from_immediate(
661 self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
663 let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
666 FnVal::Instance(instance),
673 Some(cleanup) => StackPopUnwind::Cleanup(cleanup),
674 None => StackPopUnwind::Skip,