1 //! Inlining pass for MIR functions
2 use crate::deref_separator::deref_finder;
3 use rustc_attr::InlineAttr;
4 use rustc_const_eval::transform::validate::equal_up_to_regions;
5 use rustc_index::bit_set::BitSet;
6 use rustc_index::vec::Idx;
7 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
8 use rustc_middle::mir::visit::*;
9 use rustc_middle::mir::*;
10 use rustc_middle::ty::subst::Subst;
11 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
12 use rustc_session::config::OptLevel;
13 use rustc_span::def_id::DefId;
14 use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span};
15 use rustc_target::abi::VariantIdx;
16 use rustc_target::spec::abi::Abi;
18 use super::simplify::{remove_dead_blocks, CfgSimplifier};
21 use std::ops::{Range, RangeFrom};
25 const INSTR_COST: usize = 5;
26 const CALL_PENALTY: usize = 25;
27 const LANDINGPAD_PENALTY: usize = 50;
28 const RESUME_PENALTY: usize = 45;
30 const UNKNOWN_SIZE_COST: usize = 10;
34 #[derive(Copy, Clone, Debug)]
35 struct CallSite<'tcx> {
36 callee: Instance<'tcx>,
37 fn_sig: ty::PolyFnSig<'tcx>,
39 target: Option<BasicBlock>,
40 source_info: SourceInfo,
43 impl<'tcx> MirPass<'tcx> for Inline {
44 fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
45 if let Some(enabled) = sess.opts.unstable_opts.inline_mir {
49 match sess.mir_opt_level() {
52 (sess.opts.optimize == OptLevel::Default
53 || sess.opts.optimize == OptLevel::Aggressive)
54 && sess.opts.incremental == None
60 fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
61 let span = trace_span!("inline", body = %tcx.def_path_str(body.source.def_id()));
62 let _guard = span.enter();
63 if inline(tcx, body) {
64 debug!("running simplify cfg on {:?}", body.source);
65 CfgSimplifier::new(body).simplify();
66 remove_dead_blocks(tcx, body);
67 deref_finder(tcx, body);
72 fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
73 let def_id = body.source.def_id().expect_local();
75 // Only do inlining into fn bodies.
76 if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() {
79 if body.source.promoted.is_some() {
82 // Avoid inlining into generators, since their `optimized_mir` is used for layout computation,
83 // which can create a cycle, even when no attempt is made to inline the function in the other
85 if body.generator.is_some() {
89 let param_env = tcx.param_env_reveal_all_normalized(def_id);
91 let mut this = Inliner {
94 codegen_fn_attrs: tcx.codegen_fn_attrs(def_id),
98 let blocks = BasicBlock::new(0)..body.basic_blocks.next_index();
99 this.process_blocks(body, blocks);
103 struct Inliner<'tcx> {
105 param_env: ParamEnv<'tcx>,
106 /// Caller codegen attributes.
107 codegen_fn_attrs: &'tcx CodegenFnAttrs,
108 /// Stack of inlined instances.
109 /// We only check the `DefId` and not the substs because we want to
110 /// avoid inlining cases of polymorphic recursion.
111 /// The number of `DefId`s is finite, so checking history is enough
112 /// to ensure that we do not loop endlessly while inlining.
114 /// Indicates that the caller body has been modified.
118 impl<'tcx> Inliner<'tcx> {
119 fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) {
121 let bb_data = &caller_body[bb];
122 if bb_data.is_cleanup {
126 let Some(callsite) = self.resolve_callsite(caller_body, bb, bb_data) else {
130 let span = trace_span!("process_blocks", %callsite.callee, ?bb);
131 let _guard = span.enter();
133 match self.try_inlining(caller_body, &callsite) {
135 debug!("not-inlined {} [{}]", callsite.callee, reason);
139 debug!("inlined {}", callsite.callee);
141 self.history.push(callsite.callee.def_id());
142 self.process_blocks(caller_body, new_blocks);
149 /// Attempts to inline a callsite into the caller body. When successful returns basic blocks
150 /// containing the inlined body. Otherwise returns an error describing why inlining didn't take
154 caller_body: &mut Body<'tcx>,
155 callsite: &CallSite<'tcx>,
156 ) -> Result<std::ops::Range<BasicBlock>, &'static str> {
157 let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id());
158 self.check_codegen_attributes(callsite, callee_attrs)?;
159 self.check_mir_is_available(caller_body, &callsite.callee)?;
160 let callee_body = self.tcx.instance_mir(callsite.callee.def);
161 self.check_mir_body(callsite, callee_body, callee_attrs)?;
163 if !self.tcx.consider_optimizing(|| {
164 format!("Inline {:?} into {:?}", callsite.callee, caller_body.source)
166 return Err("optimization fuel exhausted");
169 let Ok(callee_body) = callsite.callee.try_subst_mir_and_normalize_erasing_regions(
174 return Err("failed to normalize callee body");
177 // Check call signature compatibility.
178 // Normally, this shouldn't be required, but trait normalization failure can create a
180 let terminator = caller_body[callsite.block].terminator.as_ref().unwrap();
181 let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() };
182 let destination_ty = destination.ty(&caller_body.local_decls, self.tcx).ty;
183 let output_type = callee_body.return_ty();
184 if !equal_up_to_regions(self.tcx, self.param_env, output_type, destination_ty) {
185 trace!(?output_type, ?destination_ty);
186 return Err("failed to normalize return type");
188 if callsite.fn_sig.abi() == Abi::RustCall {
189 let (arg_tuple, skipped_args) = match &args[..] {
190 [arg_tuple] => (arg_tuple, 0),
191 [_, arg_tuple] => (arg_tuple, 1),
192 _ => bug!("Expected `rust-call` to have 1 or 2 args"),
195 let arg_tuple_ty = arg_tuple.ty(&caller_body.local_decls, self.tcx);
196 let ty::Tuple(arg_tuple_tys) = arg_tuple_ty.kind() else {
197 bug!("Closure arguments are not passed as a tuple");
200 for (arg_ty, input) in
201 arg_tuple_tys.iter().zip(callee_body.args_iter().skip(skipped_args))
203 let input_type = callee_body.local_decls[input].ty;
204 if !equal_up_to_regions(self.tcx, self.param_env, arg_ty, input_type) {
205 trace!(?arg_ty, ?input_type);
206 return Err("failed to normalize tuple argument type");
210 for (arg, input) in args.iter().zip(callee_body.args_iter()) {
211 let input_type = callee_body.local_decls[input].ty;
212 let arg_ty = arg.ty(&caller_body.local_decls, self.tcx);
213 if !equal_up_to_regions(self.tcx, self.param_env, arg_ty, input_type) {
214 trace!(?arg_ty, ?input_type);
215 return Err("failed to normalize argument type");
220 let old_blocks = caller_body.basic_blocks.next_index();
221 self.inline_call(caller_body, &callsite, callee_body);
222 let new_blocks = old_blocks..caller_body.basic_blocks.next_index();
227 fn check_mir_is_available(
229 caller_body: &Body<'tcx>,
230 callee: &Instance<'tcx>,
231 ) -> Result<(), &'static str> {
232 let caller_def_id = caller_body.source.def_id();
233 let callee_def_id = callee.def_id();
234 if callee_def_id == caller_def_id {
235 return Err("self-recursion");
239 InstanceDef::Item(_) => {
240 // If there is no MIR available (either because it was not in metadata or
241 // because it has no MIR because it's an extern function), then the inliner
242 // won't cause cycles on this.
243 if !self.tcx.is_mir_available(callee_def_id) {
244 return Err("item MIR unavailable");
247 // These have no own callable MIR.
248 InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => {
249 return Err("instance without MIR (intrinsic / virtual)");
251 // This cannot result in an immediate cycle since the callee MIR is a shim, which does
252 // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
253 // do not need to catch this here, we can wait until the inliner decides to continue
254 // inlining a second time.
255 InstanceDef::VTableShim(_)
256 | InstanceDef::ReifyShim(_)
257 | InstanceDef::FnPtrShim(..)
258 | InstanceDef::ClosureOnceShim { .. }
259 | InstanceDef::DropGlue(..)
260 | InstanceDef::CloneShim(..) => return Ok(()),
263 if self.tcx.is_constructor(callee_def_id) {
264 trace!("constructors always have MIR");
265 // Constructor functions cannot cause a query cycle.
269 if callee_def_id.is_local() {
270 // Avoid a cycle here by only using `instance_mir` only if we have
271 // a lower `DefPathHash` than the callee. This ensures that the callee will
272 // not inline us. This trick even works with incremental compilation,
273 // since `DefPathHash` is stable.
274 if self.tcx.def_path_hash(caller_def_id).local_hash()
275 < self.tcx.def_path_hash(callee_def_id).local_hash()
280 // If we know for sure that the function we're calling will itself try to
281 // call us, then we avoid inlining that function.
282 if self.tcx.mir_callgraph_reachable((*callee, caller_def_id.expect_local())) {
283 return Err("caller might be reachable from callee (query cycle avoidance)");
288 // This cannot result in an immediate cycle since the callee MIR is from another crate
289 // and is already optimized. Any subsequent inlining may cause cycles, but we do
290 // not need to catch this here, we can wait until the inliner decides to continue
291 // inlining a second time.
292 trace!("functions from other crates always have MIR");
299 caller_body: &Body<'tcx>,
301 bb_data: &BasicBlockData<'tcx>,
302 ) -> Option<CallSite<'tcx>> {
303 // Only consider direct calls to functions
304 let terminator = bb_data.terminator();
305 if let TerminatorKind::Call { ref func, target, .. } = terminator.kind {
306 let func_ty = func.ty(caller_body, self.tcx);
307 if let ty::FnDef(def_id, substs) = *func_ty.kind() {
308 // To resolve an instance its substs have to be fully normalized.
309 let substs = self.tcx.try_normalize_erasing_regions(self.param_env, substs).ok()?;
311 Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?;
313 if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
317 if self.history.contains(&callee.def_id()) {
321 let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, substs);
323 return Some(CallSite {
328 source_info: terminator.source_info,
336 /// Returns an error if inlining is not possible based on codegen attributes alone. A success
337 /// indicates that inlining decision should be based on other criteria.
338 fn check_codegen_attributes(
340 callsite: &CallSite<'tcx>,
341 callee_attrs: &CodegenFnAttrs,
342 ) -> Result<(), &'static str> {
343 match callee_attrs.inline {
344 InlineAttr::Never => return Err("never inline hint"),
345 InlineAttr::Always | InlineAttr::Hint => {}
346 InlineAttr::None => {
347 if self.tcx.sess.mir_opt_level() <= 2 {
348 return Err("at mir-opt-level=2, only #[inline] is inlined");
353 // Only inline local functions if they would be eligible for cross-crate
354 // inlining. This is to ensure that the final crate doesn't have MIR that
355 // reference unexported symbols
356 if callsite.callee.def_id().is_local() {
357 let is_generic = callsite.callee.substs.non_erasable_generics().next().is_some();
358 if !is_generic && !callee_attrs.requests_inline() {
359 return Err("not exported");
363 if callsite.fn_sig.c_variadic() {
364 return Err("C variadic");
367 if callee_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
371 if callee_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
375 if callee_attrs.no_sanitize != self.codegen_fn_attrs.no_sanitize {
376 return Err("incompatible sanitizer set");
379 if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set {
380 return Err("incompatible instruction set");
383 for feature in &callee_attrs.target_features {
384 if !self.codegen_fn_attrs.target_features.contains(feature) {
385 return Err("incompatible target feature");
392 /// Returns inlining decision that is based on the examination of callee MIR body.
393 /// Assumes that codegen attributes have been checked for compatibility already.
394 #[instrument(level = "debug", skip(self, callee_body))]
397 callsite: &CallSite<'tcx>,
398 callee_body: &Body<'tcx>,
399 callee_attrs: &CodegenFnAttrs,
400 ) -> Result<(), &'static str> {
403 let mut threshold = if callee_attrs.requests_inline() {
404 self.tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100)
406 self.tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50)
409 // Give a bonus functions with a small number of blocks,
410 // We normally have two or three blocks for even
411 // very small functions.
412 if callee_body.basic_blocks.len() <= 3 {
413 threshold += threshold / 4;
415 debug!(" final inline threshold = {}", threshold);
417 // FIXME: Give a bonus to functions with only a single caller
418 let diverges = matches!(
419 callee_body.basic_blocks[START_BLOCK].terminator().kind,
420 TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. }
422 if diverges && !matches!(callee_attrs.inline, InlineAttr::Always) {
423 return Err("callee diverges unconditionally");
426 let mut checker = CostChecker {
428 param_env: self.param_env,
429 instance: callsite.callee,
435 // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
436 let mut work_list = vec![START_BLOCK];
437 let mut visited = BitSet::new_empty(callee_body.basic_blocks.len());
438 while let Some(bb) = work_list.pop() {
439 if !visited.insert(bb.index()) {
443 let blk = &callee_body.basic_blocks[bb];
444 checker.visit_basic_block_data(bb, blk);
446 let term = blk.terminator();
447 if let TerminatorKind::Drop { ref place, target, unwind }
448 | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } = term.kind
450 work_list.push(target);
452 // If the place doesn't actually need dropping, treat it like a regular goto.
453 let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty);
454 if ty.needs_drop(tcx, self.param_env) && let Some(unwind) = unwind {
455 work_list.push(unwind);
458 work_list.extend(term.successors())
462 // Count up the cost of local variables and temps, if we know the size
463 // use that, otherwise we use a moderately-large dummy cost.
464 for v in callee_body.vars_and_temps_iter() {
465 checker.visit_local_decl(v, &callee_body.local_decls[v]);
468 // Abort if type validation found anything fishy.
471 let cost = checker.cost;
472 if let InlineAttr::Always = callee_attrs.inline {
473 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
475 } else if cost <= threshold {
476 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
479 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
480 Err("cost above threshold")
486 caller_body: &mut Body<'tcx>,
487 callsite: &CallSite<'tcx>,
488 mut callee_body: Body<'tcx>,
490 let terminator = caller_body[callsite.block].terminator.take().unwrap();
491 match terminator.kind {
492 TerminatorKind::Call { args, destination, cleanup, .. } => {
493 // If the call is something like `a[*i] = f(i)`, where
494 // `i : &mut usize`, then just duplicating the `a[*i]`
495 // Place could result in two different locations if `f`
496 // writes to `i`. To prevent this we need to create a temporary
497 // borrow of the place and pass the destination as `*temp` instead.
498 fn dest_needs_borrow(place: Place<'_>) -> bool {
499 for elem in place.projection.iter() {
501 ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
509 let dest = if dest_needs_borrow(destination) {
510 trace!("creating temp for return destination");
511 let dest = Rvalue::Ref(
512 self.tcx.lifetimes.re_erased,
513 BorrowKind::Mut { allow_two_phase_borrow: false },
516 let dest_ty = dest.ty(caller_body, self.tcx);
517 let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty));
518 caller_body[callsite.block].statements.push(Statement {
519 source_info: callsite.source_info,
520 kind: StatementKind::Assign(Box::new((temp, dest))),
522 self.tcx.mk_place_deref(temp)
527 // Copy the arguments if needed.
528 let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body);
530 let mut expn_data = ExpnData::default(
532 callsite.source_info.span,
533 self.tcx.sess.edition(),
537 expn_data.def_site = callee_body.span;
539 self.tcx.with_stable_hashing_context(|hcx| LocalExpnId::fresh(expn_data, hcx));
540 let mut integrator = Integrator {
542 new_locals: Local::new(caller_body.local_decls.len())..,
543 new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
544 new_blocks: BasicBlock::new(caller_body.basic_blocks.len())..,
546 callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
548 cleanup_block: cleanup,
549 in_cleanup_block: false,
552 always_live_locals: BitSet::new_filled(callee_body.local_decls.len()),
555 // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
556 // (or existing ones, in a few special cases) in the caller.
557 integrator.visit_body(&mut callee_body);
559 // If there are any locals without storage markers, give them storage only for the
560 // duration of the call.
561 for local in callee_body.vars_and_temps_iter() {
562 if !callee_body.local_decls[local].internal
563 && integrator.always_live_locals.contains(local)
565 let new_local = integrator.map_local(local);
566 caller_body[callsite.block].statements.push(Statement {
567 source_info: callsite.source_info,
568 kind: StatementKind::StorageLive(new_local),
572 if let Some(block) = callsite.target {
573 // To avoid repeated O(n) insert, push any new statements to the end and rotate
576 for local in callee_body.vars_and_temps_iter().rev() {
577 if !callee_body.local_decls[local].internal
578 && integrator.always_live_locals.contains(local)
580 let new_local = integrator.map_local(local);
581 caller_body[block].statements.push(Statement {
582 source_info: callsite.source_info,
583 kind: StatementKind::StorageDead(new_local),
588 caller_body[block].statements.rotate_right(n);
591 // Insert all of the (mapped) parts of the callee body into the caller.
592 caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
593 caller_body.source_scopes.extend(&mut callee_body.source_scopes.drain(..));
594 caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
595 caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
597 caller_body[callsite.block].terminator = Some(Terminator {
598 source_info: callsite.source_info,
599 kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
602 // Copy only unevaluated constants from the callee_body into the caller_body.
603 // Although we are only pushing `ConstKind::Unevaluated` consts to
604 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
605 // because we are calling `subst_and_normalize_erasing_regions`.
606 caller_body.required_consts.extend(
607 callee_body.required_consts.iter().copied().filter(|&ct| {
608 match ct.literal.const_for_ty() {
609 Some(ct) => matches!(ct.kind(), ConstKind::Unevaluated(_)),
615 kind => bug!("unexpected terminator kind {:?}", kind),
621 args: Vec<Operand<'tcx>>,
622 callsite: &CallSite<'tcx>,
623 caller_body: &mut Body<'tcx>,
624 callee_body: &Body<'tcx>,
628 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
629 // The caller provides the arguments wrapped up in a tuple:
631 // tuple_tmp = (a, b, c)
632 // Fn::call(closure_ref, tuple_tmp)
634 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
635 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
636 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
639 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
641 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
642 // if we "spill" that into *another* temporary, so that we can map the argument
643 // variable in the callee MIR directly to an argument variable on our side.
644 // So we introduce temporaries like:
646 // tmp0 = tuple_tmp.0
647 // tmp1 = tuple_tmp.1
648 // tmp2 = tuple_tmp.2
650 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
651 if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() {
652 let mut args = args.into_iter();
653 let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
654 let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
655 assert!(args.next().is_none());
657 let tuple = Place::from(tuple);
658 let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else {
659 bug!("Closure arguments are not passed as a tuple");
662 // The `closure_ref` in our example above.
663 let closure_ref_arg = iter::once(self_);
665 // The `tmp0`, `tmp1`, and `tmp2` in our example above.
666 let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
667 // This is e.g., `tuple_tmp.0` in our example above.
668 let tuple_field = Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty));
670 // Spill to a local to make e.g., `tmp0`.
671 self.create_temp_if_necessary(tuple_field, callsite, caller_body)
674 closure_ref_arg.chain(tuple_tmp_args).collect()
677 .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
682 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
683 /// temporary `T` and an instruction `T = arg`, and returns `T`.
684 fn create_temp_if_necessary(
687 callsite: &CallSite<'tcx>,
688 caller_body: &mut Body<'tcx>,
690 // Reuse the operand if it is a moved temporary.
691 if let Operand::Move(place) = &arg
692 && let Some(local) = place.as_local()
693 && caller_body.local_kind(local) == LocalKind::Temp
698 // Otherwise, create a temporary for the argument.
699 trace!("creating temp for argument {:?}", arg);
700 let arg_ty = arg.ty(caller_body, self.tcx);
701 let local = self.new_call_temp(caller_body, callsite, arg_ty);
702 caller_body[callsite.block].statements.push(Statement {
703 source_info: callsite.source_info,
704 kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))),
709 /// Introduces a new temporary into the caller body that is live for the duration of the call.
712 caller_body: &mut Body<'tcx>,
713 callsite: &CallSite<'tcx>,
716 let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
718 caller_body[callsite.block].statements.push(Statement {
719 source_info: callsite.source_info,
720 kind: StatementKind::StorageLive(local),
723 if let Some(block) = callsite.target {
724 caller_body[block].statements.insert(
727 source_info: callsite.source_info,
728 kind: StatementKind::StorageDead(local),
737 fn type_size_of<'tcx>(
739 param_env: ty::ParamEnv<'tcx>,
742 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
745 /// Verify that the callee body is compatible with the caller.
747 /// This visitor mostly computes the inlining cost,
748 /// but also needs to verify that types match because of normalization failure.
749 struct CostChecker<'b, 'tcx> {
751 param_env: ParamEnv<'tcx>,
753 callee_body: &'b Body<'tcx>,
754 instance: ty::Instance<'tcx>,
755 validation: Result<(), &'static str>,
758 impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
759 fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
760 // Don't count StorageLive/StorageDead in the inlining cost.
761 match statement.kind {
762 StatementKind::StorageLive(_)
763 | StatementKind::StorageDead(_)
764 | StatementKind::Deinit(_)
765 | StatementKind::Nop => {}
766 _ => self.cost += INSTR_COST,
769 self.super_statement(statement, location);
772 fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
774 match terminator.kind {
775 TerminatorKind::Drop { ref place, unwind, .. }
776 | TerminatorKind::DropAndReplace { ref place, unwind, .. } => {
777 // If the place doesn't actually need dropping, treat it like a regular goto.
778 let ty = self.instance.subst_mir(tcx, &place.ty(self.callee_body, tcx).ty);
779 if ty.needs_drop(tcx, self.param_env) {
780 self.cost += CALL_PENALTY;
781 if unwind.is_some() {
782 self.cost += LANDINGPAD_PENALTY;
785 self.cost += INSTR_COST;
788 TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
789 let fn_ty = self.instance.subst_mir(tcx, &f.literal.ty());
790 self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
791 // Don't give intrinsics the extra penalty for calls
796 if cleanup.is_some() {
797 self.cost += LANDINGPAD_PENALTY;
800 TerminatorKind::Assert { cleanup, .. } => {
801 self.cost += CALL_PENALTY;
802 if cleanup.is_some() {
803 self.cost += LANDINGPAD_PENALTY;
806 TerminatorKind::Resume => self.cost += RESUME_PENALTY,
807 TerminatorKind::InlineAsm { cleanup, .. } => {
808 self.cost += INSTR_COST;
809 if cleanup.is_some() {
810 self.cost += LANDINGPAD_PENALTY;
813 _ => self.cost += INSTR_COST,
816 self.super_terminator(terminator, location);
819 /// Count up the cost of local variables and temps, if we know the size
820 /// use that, otherwise we use a moderately-large dummy cost.
821 fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
823 let ptr_size = tcx.data_layout.pointer_size.bytes();
825 let ty = self.instance.subst_mir(tcx, &local_decl.ty);
826 // Cost of the var is the size in machine-words, if we know
828 if let Some(size) = type_size_of(tcx, self.param_env, ty) {
829 self.cost += ((size + ptr_size - 1) / ptr_size) as usize;
831 self.cost += UNKNOWN_SIZE_COST;
834 self.super_local_decl(local, local_decl)
837 /// This method duplicates code from MIR validation in an attempt to detect type mismatches due
838 /// to normalization failure.
839 fn visit_projection_elem(
842 proj_base: &[PlaceElem<'tcx>],
843 elem: PlaceElem<'tcx>,
844 context: PlaceContext,
847 if let ProjectionElem::Field(f, ty) = elem {
848 let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
849 let parent_ty = parent.ty(&self.callee_body.local_decls, self.tcx);
850 let check_equal = |this: &mut Self, f_ty| {
851 if !equal_up_to_regions(this.tcx, this.param_env, ty, f_ty) {
853 this.validation = Err("failed to normalize projection type");
858 let kind = match parent_ty.ty.kind() {
859 &ty::Opaque(def_id, substs) => {
860 self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
866 ty::Tuple(fields) => {
867 let Some(f_ty) = fields.get(f.as_usize()) else {
868 self.validation = Err("malformed MIR");
871 check_equal(self, *f_ty);
873 ty::Adt(adt_def, substs) => {
874 let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
875 let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
876 self.validation = Err("malformed MIR");
879 check_equal(self, field.ty(self.tcx, substs));
881 ty::Closure(_, substs) => {
882 let substs = substs.as_closure();
883 let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
884 self.validation = Err("malformed MIR");
887 check_equal(self, f_ty);
889 &ty::Generator(def_id, substs, _) => {
890 let f_ty = if let Some(var) = parent_ty.variant_index {
891 let gen_body = if def_id == self.callee_body.source.def_id() {
894 self.tcx.optimized_mir(def_id)
897 let Some(layout) = gen_body.generator_layout() else {
898 self.validation = Err("malformed MIR");
902 let Some(&local) = layout.variant_fields[var].get(f) else {
903 self.validation = Err("malformed MIR");
907 let Some(&f_ty) = layout.field_tys.get(local) else {
908 self.validation = Err("malformed MIR");
914 let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
915 self.validation = Err("malformed MIR");
922 check_equal(self, f_ty);
924 _ => self.validation = Err("malformed MIR"),
928 self.super_projection_elem(local, proj_base, elem, context, location);
935 * Integrates blocks from the callee function into the calling function.
936 * Updates block indices, references to locals and other control flow
939 struct Integrator<'a, 'tcx> {
941 new_locals: RangeFrom<Local>,
942 new_scopes: RangeFrom<SourceScope>,
943 new_blocks: RangeFrom<BasicBlock>,
944 destination: Place<'tcx>,
945 callsite_scope: SourceScopeData<'tcx>,
946 callsite: &'a CallSite<'tcx>,
947 cleanup_block: Option<BasicBlock>,
948 in_cleanup_block: bool,
950 expn_data: LocalExpnId,
951 always_live_locals: BitSet<Local>,
954 impl Integrator<'_, '_> {
955 fn map_local(&self, local: Local) -> Local {
956 let new = if local == RETURN_PLACE {
957 self.destination.local
959 let idx = local.index() - 1;
960 if idx < self.args.len() {
963 Local::new(self.new_locals.start.index() + (idx - self.args.len()))
966 trace!("mapping local `{:?}` to `{:?}`", local, new);
970 fn map_scope(&self, scope: SourceScope) -> SourceScope {
971 let new = SourceScope::new(self.new_scopes.start.index() + scope.index());
972 trace!("mapping scope `{:?}` to `{:?}`", scope, new);
976 fn map_block(&self, block: BasicBlock) -> BasicBlock {
977 let new = BasicBlock::new(self.new_blocks.start.index() + block.index());
978 trace!("mapping block `{:?}` to `{:?}`", block, new);
983 impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
984 fn tcx(&self) -> TyCtxt<'tcx> {
988 fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
989 *local = self.map_local(*local);
992 fn visit_source_scope_data(&mut self, scope_data: &mut SourceScopeData<'tcx>) {
993 self.super_source_scope_data(scope_data);
994 if scope_data.parent_scope.is_none() {
995 // Attach the outermost callee scope as a child of the callsite
996 // scope, via the `parent_scope` and `inlined_parent_scope` chains.
997 scope_data.parent_scope = Some(self.callsite.source_info.scope);
998 assert_eq!(scope_data.inlined_parent_scope, None);
999 scope_data.inlined_parent_scope = if self.callsite_scope.inlined.is_some() {
1000 Some(self.callsite.source_info.scope)
1002 self.callsite_scope.inlined_parent_scope
1005 // Mark the outermost callee scope as an inlined one.
1006 assert_eq!(scope_data.inlined, None);
1007 scope_data.inlined = Some((self.callsite.callee, self.callsite.source_info.span));
1008 } else if scope_data.inlined_parent_scope.is_none() {
1009 // Make it easy to find the scope with `inlined` set above.
1010 scope_data.inlined_parent_scope = Some(self.map_scope(OUTERMOST_SOURCE_SCOPE));
1014 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
1015 *scope = self.map_scope(*scope);
1018 fn visit_span(&mut self, span: &mut Span) {
1019 // Make sure that all spans track the fact that they were inlined.
1020 *span = span.fresh_expansion(self.expn_data);
1023 fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
1024 for elem in place.projection {
1025 // FIXME: Make sure that return place is not used in an indexing projection, since it
1026 // won't be rebased as it is supposed to be.
1027 assert_ne!(ProjectionElem::Index(RETURN_PLACE), elem);
1030 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
1031 let dest_proj_len = self.destination.projection.len();
1032 if place.local == RETURN_PLACE && dest_proj_len > 0 {
1033 let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
1034 projs.extend(self.destination.projection);
1035 projs.extend(place.projection);
1037 place.projection = self.tcx.intern_place_elems(&*projs);
1039 // Handles integrating any locals that occur in the base
1041 self.super_place(place, context, location)
1044 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
1045 self.in_cleanup_block = data.is_cleanup;
1046 self.super_basic_block_data(block, data);
1047 self.in_cleanup_block = false;
1050 fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
1051 self.super_retag(kind, place, loc);
1053 // We have to patch all inlined retags to be aware that they are no longer
1054 // happening on function entry.
1055 if *kind == RetagKind::FnEntry {
1056 *kind = RetagKind::Default;
1060 fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
1061 if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) =
1064 self.always_live_locals.remove(local);
1066 self.super_statement(statement, location);
1069 fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
1070 // Don't try to modify the implicit `_0` access on return (`return` terminators are
1071 // replaced down below anyways).
1072 if !matches!(terminator.kind, TerminatorKind::Return) {
1073 self.super_terminator(terminator, loc);
1076 match terminator.kind {
1077 TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
1078 TerminatorKind::Goto { ref mut target } => {
1079 *target = self.map_block(*target);
1081 TerminatorKind::SwitchInt { ref mut targets, .. } => {
1082 for tgt in targets.all_targets_mut() {
1083 *tgt = self.map_block(*tgt);
1086 TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
1087 | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
1088 *target = self.map_block(*target);
1089 if let Some(tgt) = *unwind {
1090 *unwind = Some(self.map_block(tgt));
1091 } else if !self.in_cleanup_block {
1092 // Unless this drop is in a cleanup block, add an unwind edge to
1093 // the original call's cleanup block
1094 *unwind = self.cleanup_block;
1097 TerminatorKind::Call { ref mut target, ref mut cleanup, .. } => {
1098 if let Some(ref mut tgt) = *target {
1099 *tgt = self.map_block(*tgt);
1101 if let Some(tgt) = *cleanup {
1102 *cleanup = Some(self.map_block(tgt));
1103 } else if !self.in_cleanup_block {
1104 // Unless this call is in a cleanup block, add an unwind edge to
1105 // the original call's cleanup block
1106 *cleanup = self.cleanup_block;
1109 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
1110 *target = self.map_block(*target);
1111 if let Some(tgt) = *cleanup {
1112 *cleanup = Some(self.map_block(tgt));
1113 } else if !self.in_cleanup_block {
1114 // Unless this assert is in a cleanup block, add an unwind edge to
1115 // the original call's cleanup block
1116 *cleanup = self.cleanup_block;
1119 TerminatorKind::Return => {
1120 terminator.kind = if let Some(tgt) = self.callsite.target {
1121 TerminatorKind::Goto { target: tgt }
1123 TerminatorKind::Unreachable
1126 TerminatorKind::Resume => {
1127 if let Some(tgt) = self.cleanup_block {
1128 terminator.kind = TerminatorKind::Goto { target: tgt }
1131 TerminatorKind::Abort => {}
1132 TerminatorKind::Unreachable => {}
1133 TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
1134 *real_target = self.map_block(*real_target);
1135 *imaginary_target = self.map_block(*imaginary_target);
1137 TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
1138 // see the ordering of passes in the optimized_mir query.
1140 bug!("False unwinds should have been removed before inlining")
1142 TerminatorKind::InlineAsm { ref mut destination, ref mut cleanup, .. } => {
1143 if let Some(ref mut tgt) = *destination {
1144 *tgt = self.map_block(*tgt);
1145 } else if !self.in_cleanup_block {
1146 // Unless this inline asm is in a cleanup block, add an unwind edge to
1147 // the original call's cleanup block
1148 *cleanup = self.cleanup_block;