1 //! Inlining pass for MIR functions
3 use rustc_attr::InlineAttr;
4 use rustc_index::bit_set::BitSet;
5 use rustc_index::vec::Idx;
6 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
7 use rustc_middle::mir::visit::*;
8 use rustc_middle::mir::*;
9 use rustc_middle::traits::ObligationCause;
10 use rustc_middle::ty::subst::Subst;
11 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
12 use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span};
13 use rustc_target::spec::abi::Abi;
15 use super::simplify::{remove_dead_blocks, CfgSimplifier};
18 use std::ops::{Range, RangeFrom};
22 const INSTR_COST: usize = 5;
23 const CALL_PENALTY: usize = 25;
24 const LANDINGPAD_PENALTY: usize = 50;
25 const RESUME_PENALTY: usize = 45;
27 const UNKNOWN_SIZE_COST: usize = 10;
31 #[derive(Copy, Clone, Debug)]
32 struct CallSite<'tcx> {
33 callee: Instance<'tcx>,
34 fn_sig: ty::PolyFnSig<'tcx>,
36 target: Option<BasicBlock>,
37 source_info: SourceInfo,
40 impl<'tcx> MirPass<'tcx> for Inline {
41 fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
42 if let Some(enabled) = sess.opts.debugging_opts.inline_mir {
46 sess.opts.mir_opt_level() >= 3
49 fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
50 let span = trace_span!("inline", body = %tcx.def_path_str(body.source.def_id()));
51 let _guard = span.enter();
52 if inline(tcx, body) {
53 debug!("running simplify cfg on {:?}", body.source);
54 CfgSimplifier::new(body).simplify();
55 remove_dead_blocks(tcx, body);
60 fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
61 let def_id = body.source.def_id().expect_local();
63 // Only do inlining into fn bodies.
64 if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() {
67 if body.source.promoted.is_some() {
70 // Avoid inlining into generators, since their `optimized_mir` is used for layout computation,
71 // which can create a cycle, even when no attempt is made to inline the function in the other
73 if body.generator.is_some() {
77 let param_env = tcx.param_env_reveal_all_normalized(def_id);
78 let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
79 let param_env = rustc_trait_selection::traits::normalize_param_env_or_error(
83 ObligationCause::misc(body.span, hir_id),
86 let mut this = Inliner {
89 codegen_fn_attrs: tcx.codegen_fn_attrs(def_id),
93 let blocks = BasicBlock::new(0)..body.basic_blocks().next_index();
94 this.process_blocks(body, blocks);
98 struct Inliner<'tcx> {
100 param_env: ParamEnv<'tcx>,
101 /// Caller codegen attributes.
102 codegen_fn_attrs: &'tcx CodegenFnAttrs,
103 /// Stack of inlined Instances.
104 history: Vec<ty::Instance<'tcx>>,
105 /// Indicates that the caller body has been modified.
109 impl<'tcx> Inliner<'tcx> {
110 fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) {
112 let bb_data = &caller_body[bb];
113 if bb_data.is_cleanup {
117 let Some(callsite) = self.resolve_callsite(caller_body, bb, bb_data) else {
121 let span = trace_span!("process_blocks", %callsite.callee, ?bb);
122 let _guard = span.enter();
124 match self.try_inlining(caller_body, &callsite) {
126 debug!("not-inlined {} [{}]", callsite.callee, reason);
130 debug!("inlined {}", callsite.callee);
132 self.history.push(callsite.callee);
133 self.process_blocks(caller_body, new_blocks);
140 /// Attempts to inline a callsite into the caller body. When successful returns basic blocks
141 /// containing the inlined body. Otherwise returns an error describing why inlining didn't take
145 caller_body: &mut Body<'tcx>,
146 callsite: &CallSite<'tcx>,
147 ) -> Result<std::ops::Range<BasicBlock>, &'static str> {
148 let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id());
149 self.check_codegen_attributes(callsite, callee_attrs)?;
150 self.check_mir_is_available(caller_body, &callsite.callee)?;
151 let callee_body = self.tcx.instance_mir(callsite.callee.def);
152 self.check_mir_body(callsite, callee_body, callee_attrs)?;
154 if !self.tcx.consider_optimizing(|| {
155 format!("Inline {:?} into {:?}", callsite.callee, caller_body.source)
157 return Err("optimization fuel exhausted");
160 let callee_body = callsite.callee.subst_mir_and_normalize_erasing_regions(
166 let old_blocks = caller_body.basic_blocks().next_index();
167 self.inline_call(caller_body, &callsite, callee_body);
168 let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
173 fn check_mir_is_available(
175 caller_body: &Body<'tcx>,
176 callee: &Instance<'tcx>,
177 ) -> Result<(), &'static str> {
178 let caller_def_id = caller_body.source.def_id();
179 let callee_def_id = callee.def_id();
180 if callee_def_id == caller_def_id {
181 return Err("self-recursion");
185 InstanceDef::Item(_) => {
186 // If there is no MIR available (either because it was not in metadata or
187 // because it has no MIR because it's an extern function), then the inliner
188 // won't cause cycles on this.
189 if !self.tcx.is_mir_available(callee_def_id) {
190 return Err("item MIR unavailable");
193 // These have no own callable MIR.
194 InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => {
195 return Err("instance without MIR (intrinsic / virtual)");
197 // This cannot result in an immediate cycle since the callee MIR is a shim, which does
198 // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
199 // do not need to catch this here, we can wait until the inliner decides to continue
200 // inlining a second time.
201 InstanceDef::VtableShim(_)
202 | InstanceDef::ReifyShim(_)
203 | InstanceDef::FnPtrShim(..)
204 | InstanceDef::ClosureOnceShim { .. }
205 | InstanceDef::DropGlue(..)
206 | InstanceDef::CloneShim(..) => return Ok(()),
209 if self.tcx.is_constructor(callee_def_id) {
210 trace!("constructors always have MIR");
211 // Constructor functions cannot cause a query cycle.
215 if callee_def_id.is_local() {
216 // Avoid a cycle here by only using `instance_mir` only if we have
217 // a lower `DefPathHash` than the callee. This ensures that the callee will
218 // not inline us. This trick even works with incremental compilation,
219 // since `DefPathHash` is stable.
220 if self.tcx.def_path_hash(caller_def_id).local_hash()
221 < self.tcx.def_path_hash(callee_def_id).local_hash()
226 // If we know for sure that the function we're calling will itself try to
227 // call us, then we avoid inlining that function.
228 if self.tcx.mir_callgraph_reachable((*callee, caller_def_id.expect_local())) {
229 return Err("caller might be reachable from callee (query cycle avoidance)");
234 // This cannot result in an immediate cycle since the callee MIR is from another crate
235 // and is already optimized. Any subsequent inlining may cause cycles, but we do
236 // not need to catch this here, we can wait until the inliner decides to continue
237 // inlining a second time.
238 trace!("functions from other crates always have MIR");
245 caller_body: &Body<'tcx>,
247 bb_data: &BasicBlockData<'tcx>,
248 ) -> Option<CallSite<'tcx>> {
249 // Only consider direct calls to functions
250 let terminator = bb_data.terminator();
251 if let TerminatorKind::Call { ref func, ref destination, .. } = terminator.kind {
252 let func_ty = func.ty(caller_body, self.tcx);
253 if let ty::FnDef(def_id, substs) = *func_ty.kind() {
254 // To resolve an instance its substs have to be fully normalized.
255 let substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
257 Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?;
259 if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
263 let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, substs);
265 return Some(CallSite {
269 target: destination.map(|(_, target)| target),
270 source_info: terminator.source_info,
278 /// Returns an error if inlining is not possible based on codegen attributes alone. A success
279 /// indicates that inlining decision should be based on other criteria.
280 fn check_codegen_attributes(
282 callsite: &CallSite<'tcx>,
283 callee_attrs: &CodegenFnAttrs,
284 ) -> Result<(), &'static str> {
285 if let InlineAttr::Never = callee_attrs.inline {
286 return Err("never inline hint");
289 // Only inline local functions if they would be eligible for cross-crate
290 // inlining. This is to ensure that the final crate doesn't have MIR that
291 // reference unexported symbols
292 if callsite.callee.def_id().is_local() {
293 let is_generic = callsite.callee.substs.non_erasable_generics().next().is_some();
294 if !is_generic && !callee_attrs.requests_inline() {
295 return Err("not exported");
299 if callsite.fn_sig.c_variadic() {
300 return Err("C variadic");
303 if callee_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
307 if callee_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
311 if callee_attrs.no_sanitize != self.codegen_fn_attrs.no_sanitize {
312 return Err("incompatible sanitizer set");
315 if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set {
316 return Err("incompatible instruction set");
319 for feature in &callee_attrs.target_features {
320 if !self.codegen_fn_attrs.target_features.contains(feature) {
321 return Err("incompatible target feature");
328 /// Returns inlining decision that is based on the examination of callee MIR body.
329 /// Assumes that codegen attributes have been checked for compatibility already.
330 #[instrument(level = "debug", skip(self, callee_body))]
333 callsite: &CallSite<'tcx>,
334 callee_body: &Body<'tcx>,
335 callee_attrs: &CodegenFnAttrs,
336 ) -> Result<(), &'static str> {
339 let mut threshold = if callee_attrs.requests_inline() {
340 self.tcx.sess.opts.debugging_opts.inline_mir_hint_threshold.unwrap_or(100)
342 self.tcx.sess.opts.debugging_opts.inline_mir_threshold.unwrap_or(50)
345 // Give a bonus functions with a small number of blocks,
346 // We normally have two or three blocks for even
347 // very small functions.
348 if callee_body.basic_blocks().len() <= 3 {
349 threshold += threshold / 4;
351 debug!(" final inline threshold = {}", threshold);
353 // FIXME: Give a bonus to functions with only a single caller
354 let mut first_block = true;
357 // Traverse the MIR manually so we can account for the effects of
358 // inlining on the CFG.
359 let mut work_list = vec![START_BLOCK];
360 let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
361 while let Some(bb) = work_list.pop() {
362 if !visited.insert(bb.index()) {
365 let blk = &callee_body.basic_blocks()[bb];
367 for stmt in &blk.statements {
368 // Don't count StorageLive/StorageDead in the inlining cost.
370 StatementKind::StorageLive(_)
371 | StatementKind::StorageDead(_)
372 | StatementKind::Deinit(_)
373 | StatementKind::Nop => {}
374 _ => cost += INSTR_COST,
377 let term = blk.terminator();
378 let mut is_drop = false;
380 TerminatorKind::Drop { ref place, target, unwind }
381 | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => {
383 work_list.push(target);
384 // If the place doesn't actually need dropping, treat it like
386 let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty);
387 if ty.needs_drop(tcx, self.param_env) {
388 cost += CALL_PENALTY;
389 if let Some(unwind) = unwind {
390 cost += LANDINGPAD_PENALTY;
391 work_list.push(unwind);
398 TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. }
401 // If the function always diverges, don't inline
402 // unless the cost is zero
406 TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
407 if let ty::FnDef(def_id, substs) =
408 *callsite.callee.subst_mir(self.tcx, &f.literal.ty()).kind()
410 let substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
411 if let Ok(Some(instance)) =
412 Instance::resolve(self.tcx, self.param_env, def_id, substs)
414 if callsite.callee.def_id() == instance.def_id() {
415 return Err("self-recursion");
416 } else if self.history.contains(&instance) {
417 return Err("already inlined");
420 // Don't give intrinsics the extra penalty for calls
421 let f = tcx.fn_sig(def_id);
422 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
425 cost += CALL_PENALTY;
428 cost += CALL_PENALTY;
430 if cleanup.is_some() {
431 cost += LANDINGPAD_PENALTY;
434 TerminatorKind::Assert { cleanup, .. } => {
435 cost += CALL_PENALTY;
437 if cleanup.is_some() {
438 cost += LANDINGPAD_PENALTY;
441 TerminatorKind::Resume => cost += RESUME_PENALTY,
442 TerminatorKind::InlineAsm { cleanup, .. } => {
445 if cleanup.is_some() {
446 cost += LANDINGPAD_PENALTY;
449 _ => cost += INSTR_COST,
453 for &succ in term.successors() {
454 work_list.push(succ);
461 // Count up the cost of local variables and temps, if we know the size
462 // use that, otherwise we use a moderately-large dummy cost.
464 let ptr_size = tcx.data_layout.pointer_size.bytes();
466 for v in callee_body.vars_and_temps_iter() {
467 let ty = callsite.callee.subst_mir(self.tcx, &callee_body.local_decls[v].ty);
468 // Cost of the var is the size in machine-words, if we know
470 if let Some(size) = type_size_of(tcx, self.param_env, ty) {
471 cost += ((size + ptr_size - 1) / ptr_size) as usize;
473 cost += UNKNOWN_SIZE_COST;
477 if let InlineAttr::Always = callee_attrs.inline {
478 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
481 if cost <= threshold {
482 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
485 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
486 Err("cost above threshold")
493 caller_body: &mut Body<'tcx>,
494 callsite: &CallSite<'tcx>,
495 mut callee_body: Body<'tcx>,
497 let terminator = caller_body[callsite.block].terminator.take().unwrap();
498 match terminator.kind {
499 TerminatorKind::Call { args, destination, cleanup, .. } => {
500 // If the call is something like `a[*i] = f(i)`, where
501 // `i : &mut usize`, then just duplicating the `a[*i]`
502 // Place could result in two different locations if `f`
503 // writes to `i`. To prevent this we need to create a temporary
504 // borrow of the place and pass the destination as `*temp` instead.
505 fn dest_needs_borrow(place: Place<'_>) -> bool {
506 for elem in place.projection.iter() {
508 ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
516 let dest = if let Some((destination_place, _)) = destination {
517 if dest_needs_borrow(destination_place) {
518 trace!("creating temp for return destination");
519 let dest = Rvalue::Ref(
520 self.tcx.lifetimes.re_erased,
521 BorrowKind::Mut { allow_two_phase_borrow: false },
524 let dest_ty = dest.ty(caller_body, self.tcx);
525 let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty));
526 caller_body[callsite.block].statements.push(Statement {
527 source_info: callsite.source_info,
528 kind: StatementKind::Assign(Box::new((temp, dest))),
530 self.tcx.mk_place_deref(temp)
535 trace!("creating temp for return place");
536 Place::from(self.new_call_temp(caller_body, &callsite, callee_body.return_ty()))
539 // Copy the arguments if needed.
540 let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body);
542 let mut expn_data = ExpnData::default(
544 callsite.source_info.span,
545 self.tcx.sess.edition(),
549 expn_data.def_site = callee_body.span;
551 LocalExpnId::fresh(expn_data, self.tcx.create_stable_hashing_context());
552 let mut integrator = Integrator {
554 new_locals: Local::new(caller_body.local_decls.len())..,
555 new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
556 new_blocks: BasicBlock::new(caller_body.basic_blocks().len())..,
558 return_block: callsite.target,
559 cleanup_block: cleanup,
560 in_cleanup_block: false,
563 always_live_locals: BitSet::new_filled(callee_body.local_decls.len()),
566 // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
567 // (or existing ones, in a few special cases) in the caller.
568 integrator.visit_body(&mut callee_body);
570 for scope in &mut callee_body.source_scopes {
571 // FIXME(eddyb) move this into a `fn visit_scope_data` in `Integrator`.
572 if scope.parent_scope.is_none() {
573 let callsite_scope = &caller_body.source_scopes[callsite.source_info.scope];
575 // Attach the outermost callee scope as a child of the callsite
576 // scope, via the `parent_scope` and `inlined_parent_scope` chains.
577 scope.parent_scope = Some(callsite.source_info.scope);
578 assert_eq!(scope.inlined_parent_scope, None);
579 scope.inlined_parent_scope = if callsite_scope.inlined.is_some() {
580 Some(callsite.source_info.scope)
582 callsite_scope.inlined_parent_scope
585 // Mark the outermost callee scope as an inlined one.
586 assert_eq!(scope.inlined, None);
587 scope.inlined = Some((callsite.callee, callsite.source_info.span));
588 } else if scope.inlined_parent_scope.is_none() {
589 // Make it easy to find the scope with `inlined` set above.
590 scope.inlined_parent_scope =
591 Some(integrator.map_scope(OUTERMOST_SOURCE_SCOPE));
595 // If there are any locals without storage markers, give them storage only for the
596 // duration of the call.
597 for local in callee_body.vars_and_temps_iter() {
598 if integrator.always_live_locals.contains(local) {
599 let new_local = integrator.map_local(local);
600 caller_body[callsite.block].statements.push(Statement {
601 source_info: callsite.source_info,
602 kind: StatementKind::StorageLive(new_local),
606 if let Some(block) = callsite.target {
607 // To avoid repeated O(n) insert, push any new statements to the end and rotate
610 for local in callee_body.vars_and_temps_iter().rev() {
611 if integrator.always_live_locals.contains(local) {
612 let new_local = integrator.map_local(local);
613 caller_body[block].statements.push(Statement {
614 source_info: callsite.source_info,
615 kind: StatementKind::StorageDead(new_local),
620 caller_body[block].statements.rotate_right(n);
623 // Insert all of the (mapped) parts of the callee body into the caller.
624 caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
625 caller_body.source_scopes.extend(&mut callee_body.source_scopes.drain(..));
626 caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
627 caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
629 caller_body[callsite.block].terminator = Some(Terminator {
630 source_info: callsite.source_info,
631 kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
634 // Copy only unevaluated constants from the callee_body into the caller_body.
635 // Although we are only pushing `ConstKind::Unevaluated` consts to
636 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
637 // because we are calling `subst_and_normalize_erasing_regions`.
638 caller_body.required_consts.extend(
639 callee_body.required_consts.iter().copied().filter(|&ct| {
640 match ct.literal.const_for_ty() {
641 Some(ct) => matches!(ct.val(), ConstKind::Unevaluated(_)),
647 kind => bug!("unexpected terminator kind {:?}", kind),
653 args: Vec<Operand<'tcx>>,
654 callsite: &CallSite<'tcx>,
655 caller_body: &mut Body<'tcx>,
656 callee_body: &Body<'tcx>,
660 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
661 // The caller provides the arguments wrapped up in a tuple:
663 // tuple_tmp = (a, b, c)
664 // Fn::call(closure_ref, tuple_tmp)
666 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
667 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
668 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
671 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
673 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
674 // if we "spill" that into *another* temporary, so that we can map the argument
675 // variable in the callee MIR directly to an argument variable on our side.
676 // So we introduce temporaries like:
678 // tmp0 = tuple_tmp.0
679 // tmp1 = tuple_tmp.1
680 // tmp2 = tuple_tmp.2
682 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
683 if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() {
684 let mut args = args.into_iter();
685 let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
686 let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
687 assert!(args.next().is_none());
689 let tuple = Place::from(tuple);
690 let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else {
691 bug!("Closure arguments are not passed as a tuple");
694 // The `closure_ref` in our example above.
695 let closure_ref_arg = iter::once(self_);
697 // The `tmp0`, `tmp1`, and `tmp2` in our example above.
698 let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
699 // This is e.g., `tuple_tmp.0` in our example above.
700 let tuple_field = Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty));
702 // Spill to a local to make e.g., `tmp0`.
703 self.create_temp_if_necessary(tuple_field, callsite, caller_body)
706 closure_ref_arg.chain(tuple_tmp_args).collect()
709 .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
714 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
715 /// temporary `T` and an instruction `T = arg`, and returns `T`.
716 fn create_temp_if_necessary(
719 callsite: &CallSite<'tcx>,
720 caller_body: &mut Body<'tcx>,
722 // Reuse the operand if it is a moved temporary.
723 if let Operand::Move(place) = &arg
724 && let Some(local) = place.as_local()
725 && caller_body.local_kind(local) == LocalKind::Temp
730 // Otherwise, create a temporary for the argument.
731 trace!("creating temp for argument {:?}", arg);
732 let arg_ty = arg.ty(caller_body, self.tcx);
733 let local = self.new_call_temp(caller_body, callsite, arg_ty);
734 caller_body[callsite.block].statements.push(Statement {
735 source_info: callsite.source_info,
736 kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))),
741 /// Introduces a new temporary into the caller body that is live for the duration of the call.
744 caller_body: &mut Body<'tcx>,
745 callsite: &CallSite<'tcx>,
748 let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
750 caller_body[callsite.block].statements.push(Statement {
751 source_info: callsite.source_info,
752 kind: StatementKind::StorageLive(local),
755 if let Some(block) = callsite.target {
756 caller_body[block].statements.insert(
759 source_info: callsite.source_info,
760 kind: StatementKind::StorageDead(local),
769 fn type_size_of<'tcx>(
771 param_env: ty::ParamEnv<'tcx>,
774 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
780 * Integrates blocks from the callee function into the calling function.
781 * Updates block indices, references to locals and other control flow
784 struct Integrator<'a, 'tcx> {
786 new_locals: RangeFrom<Local>,
787 new_scopes: RangeFrom<SourceScope>,
788 new_blocks: RangeFrom<BasicBlock>,
789 destination: Place<'tcx>,
790 return_block: Option<BasicBlock>,
791 cleanup_block: Option<BasicBlock>,
792 in_cleanup_block: bool,
794 expn_data: LocalExpnId,
795 always_live_locals: BitSet<Local>,
798 impl Integrator<'_, '_> {
799 fn map_local(&self, local: Local) -> Local {
800 let new = if local == RETURN_PLACE {
801 self.destination.local
803 let idx = local.index() - 1;
804 if idx < self.args.len() {
807 Local::new(self.new_locals.start.index() + (idx - self.args.len()))
810 trace!("mapping local `{:?}` to `{:?}`", local, new);
814 fn map_scope(&self, scope: SourceScope) -> SourceScope {
815 let new = SourceScope::new(self.new_scopes.start.index() + scope.index());
816 trace!("mapping scope `{:?}` to `{:?}`", scope, new);
820 fn map_block(&self, block: BasicBlock) -> BasicBlock {
821 let new = BasicBlock::new(self.new_blocks.start.index() + block.index());
822 trace!("mapping block `{:?}` to `{:?}`", block, new);
827 impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
828 fn tcx(&self) -> TyCtxt<'tcx> {
832 fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
833 *local = self.map_local(*local);
836 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
837 *scope = self.map_scope(*scope);
840 fn visit_span(&mut self, span: &mut Span) {
841 // Make sure that all spans track the fact that they were inlined.
842 *span = span.fresh_expansion(self.expn_data);
845 fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
846 for elem in place.projection {
847 // FIXME: Make sure that return place is not used in an indexing projection, since it
848 // won't be rebased as it is supposed to be.
849 assert_ne!(ProjectionElem::Index(RETURN_PLACE), elem);
852 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
853 let dest_proj_len = self.destination.projection.len();
854 if place.local == RETURN_PLACE && dest_proj_len > 0 {
855 let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
856 projs.extend(self.destination.projection);
857 projs.extend(place.projection);
859 place.projection = self.tcx.intern_place_elems(&*projs);
861 // Handles integrating any locals that occur in the base
863 self.super_place(place, context, location)
866 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
867 self.in_cleanup_block = data.is_cleanup;
868 self.super_basic_block_data(block, data);
869 self.in_cleanup_block = false;
872 fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
873 self.super_retag(kind, place, loc);
875 // We have to patch all inlined retags to be aware that they are no longer
876 // happening on function entry.
877 if *kind == RetagKind::FnEntry {
878 *kind = RetagKind::Default;
882 fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
883 if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) =
886 self.always_live_locals.remove(local);
888 self.super_statement(statement, location);
891 fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
892 // Don't try to modify the implicit `_0` access on return (`return` terminators are
893 // replaced down below anyways).
894 if !matches!(terminator.kind, TerminatorKind::Return) {
895 self.super_terminator(terminator, loc);
898 match terminator.kind {
899 TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
900 TerminatorKind::Goto { ref mut target } => {
901 *target = self.map_block(*target);
903 TerminatorKind::SwitchInt { ref mut targets, .. } => {
904 for tgt in targets.all_targets_mut() {
905 *tgt = self.map_block(*tgt);
908 TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
909 | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
910 *target = self.map_block(*target);
911 if let Some(tgt) = *unwind {
912 *unwind = Some(self.map_block(tgt));
913 } else if !self.in_cleanup_block {
914 // Unless this drop is in a cleanup block, add an unwind edge to
915 // the original call's cleanup block
916 *unwind = self.cleanup_block;
919 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
920 if let Some((_, ref mut tgt)) = *destination {
921 *tgt = self.map_block(*tgt);
923 if let Some(tgt) = *cleanup {
924 *cleanup = Some(self.map_block(tgt));
925 } else if !self.in_cleanup_block {
926 // Unless this call is in a cleanup block, add an unwind edge to
927 // the original call's cleanup block
928 *cleanup = self.cleanup_block;
931 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
932 *target = self.map_block(*target);
933 if let Some(tgt) = *cleanup {
934 *cleanup = Some(self.map_block(tgt));
935 } else if !self.in_cleanup_block {
936 // Unless this assert is in a cleanup block, add an unwind edge to
937 // the original call's cleanup block
938 *cleanup = self.cleanup_block;
941 TerminatorKind::Return => {
942 terminator.kind = if let Some(tgt) = self.return_block {
943 TerminatorKind::Goto { target: tgt }
945 TerminatorKind::Unreachable
948 TerminatorKind::Resume => {
949 if let Some(tgt) = self.cleanup_block {
950 terminator.kind = TerminatorKind::Goto { target: tgt }
953 TerminatorKind::Abort => {}
954 TerminatorKind::Unreachable => {}
955 TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
956 *real_target = self.map_block(*real_target);
957 *imaginary_target = self.map_block(*imaginary_target);
959 TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
960 // see the ordering of passes in the optimized_mir query.
962 bug!("False unwinds should have been removed before inlining")
964 TerminatorKind::InlineAsm { ref mut destination, ref mut cleanup, .. } => {
965 if let Some(ref mut tgt) = *destination {
966 *tgt = self.map_block(*tgt);
967 } else if !self.in_cleanup_block {
968 // Unless this inline asm is in a cleanup block, add an unwind edge to
969 // the original call's cleanup block
970 *cleanup = self.cleanup_block;