1 //! Inlining pass for MIR functions
3 use rustc_attr as attr;
5 use rustc_index::bit_set::BitSet;
6 use rustc_index::vec::Idx;
7 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
8 use rustc_middle::mir::visit::*;
9 use rustc_middle::mir::*;
10 use rustc_middle::ty::subst::Subst;
11 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
12 use rustc_span::{hygiene::ExpnKind, ExpnData, Span};
13 use rustc_target::spec::abi::Abi;
15 use super::simplify::{remove_dead_blocks, CfgSimplifier};
16 use crate::transform::MirPass;
18 use std::ops::{Range, RangeFrom};
20 const INSTR_COST: usize = 5;
21 const CALL_PENALTY: usize = 25;
22 const LANDINGPAD_PENALTY: usize = 50;
23 const RESUME_PENALTY: usize = 45;
25 const UNKNOWN_SIZE_COST: usize = 10;
29 #[derive(Copy, Clone, Debug)]
30 struct CallSite<'tcx> {
31 callee: Instance<'tcx>,
32 fn_sig: ty::PolyFnSig<'tcx>,
34 target: Option<BasicBlock>,
35 source_info: SourceInfo,
38 impl<'tcx> MirPass<'tcx> for Inline {
39 fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
40 if tcx.sess.opts.debugging_opts.mir_opt_level < 2 {
44 if inline(tcx, body) {
45 debug!("running simplify cfg on {:?}", body.source);
46 CfgSimplifier::new(body).simplify();
47 remove_dead_blocks(body);
52 fn inline(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
53 let def_id = body.source.def_id();
54 let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
56 // Only do inlining into fn bodies.
57 if !tcx.hir().body_owner_kind(hir_id).is_fn_or_closure() {
60 if body.source.promoted.is_some() {
64 let mut this = Inliner {
66 param_env: tcx.param_env_reveal_all_normalized(body.source.def_id()),
67 codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()),
72 let blocks = BasicBlock::new(0)..body.basic_blocks().next_index();
73 this.process_blocks(body, blocks);
77 struct Inliner<'tcx> {
79 param_env: ParamEnv<'tcx>,
80 /// Caller codegen attributes.
81 codegen_fn_attrs: &'tcx CodegenFnAttrs,
84 /// Stack of inlined instances.
85 history: Vec<Instance<'tcx>>,
86 /// Indicates that the caller body has been modified.
91 fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) {
93 let callsite = match self.get_valid_function_call(bb, &caller_body[bb], caller_body) {
98 if !self.is_mir_available(&callsite.callee, caller_body) {
99 debug!("MIR unavailable {}", callsite.callee);
103 let callee_body = self.tcx.instance_mir(callsite.callee.def);
104 if !self.should_inline(callsite, callee_body) {
108 if !self.tcx.consider_optimizing(|| {
109 format!("Inline {:?} into {}", callee_body.span, callsite.callee)
114 let callee_body = callsite.callee.subst_mir_and_normalize_erasing_regions(
120 let old_blocks = caller_body.basic_blocks().next_index();
121 self.inline_call(callsite, caller_body, callee_body);
122 let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
125 self.history.push(callsite.callee);
126 self.process_blocks(caller_body, new_blocks);
131 fn is_mir_available(&self, callee: &Instance<'tcx>, caller_body: &Body<'tcx>) -> bool {
132 if let InstanceDef::Item(_) = callee.def {
133 if !self.tcx.is_mir_available(callee.def_id()) {
138 if let Some(callee_def_id) = callee.def_id().as_local() {
139 let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
140 // Avoid a cycle here by only using `instance_mir` only if we have
141 // a lower `HirId` than the callee. This ensures that the callee will
142 // not inline us. This trick only works without incremental compilation.
143 // So don't do it if that is enabled. Also avoid inlining into generators,
144 // since their `optimized_mir` is used for layout computation, which can
145 // create a cycle, even when no attempt is made to inline the function
146 // in the other direction.
147 !self.tcx.dep_graph.is_fully_enabled()
148 && self.hir_id < callee_hir_id
149 && caller_body.generator_kind.is_none()
151 // This cannot result in a cycle since the callee MIR is from another crate
152 // and is already optimized.
157 fn get_valid_function_call(
160 bb_data: &BasicBlockData<'tcx>,
161 caller_body: &Body<'tcx>,
162 ) -> Option<CallSite<'tcx>> {
163 // Don't inline calls that are in cleanup blocks.
164 if bb_data.is_cleanup {
168 // Only consider direct calls to functions
169 let terminator = bb_data.terminator();
170 if let TerminatorKind::Call { ref func, ref destination, .. } = terminator.kind {
171 let func_ty = func.ty(caller_body, self.tcx);
172 if let ty::FnDef(def_id, substs) = *func_ty.kind() {
173 // To resolve an instance its substs have to be fully normalized.
174 let substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
176 Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?;
178 if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
182 let fn_sig = self.tcx.fn_sig(def_id).subst(self.tcx, substs);
184 return Some(CallSite {
188 target: destination.map(|(_, target)| target),
189 source_info: terminator.source_info,
197 fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
198 debug!("should_inline({:?})", callsite);
201 if callsite.fn_sig.c_variadic() {
202 debug!("callee is variadic - not inlining");
206 let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee.def_id());
208 let self_features = &self.codegen_fn_attrs.target_features;
209 let callee_features = &codegen_fn_attrs.target_features;
210 if callee_features.iter().any(|feature| !self_features.contains(feature)) {
211 debug!("`callee has extra target features - not inlining");
215 if self.codegen_fn_attrs.no_sanitize != codegen_fn_attrs.no_sanitize {
216 debug!("`callee has incompatible no_sanitize attribute - not inlining");
220 let hinted = match codegen_fn_attrs.inline {
221 // Just treat inline(always) as a hint for now,
222 // there are cases that prevent inlining that we
223 // need to check for first.
224 attr::InlineAttr::Always => true,
225 attr::InlineAttr::Never => {
226 debug!("`#[inline(never)]` present - not inlining");
229 attr::InlineAttr::Hint => true,
230 attr::InlineAttr::None => false,
233 // Only inline local functions if they would be eligible for cross-crate
234 // inlining. This is to ensure that the final crate doesn't have MIR that
235 // reference unexported symbols
236 if callsite.callee.def_id().is_local() {
237 if callsite.callee.substs.non_erasable_generics().count() == 0 && !hinted {
238 debug!(" callee is an exported function - not inlining");
243 let mut threshold = if hinted {
244 self.tcx.sess.opts.debugging_opts.inline_mir_hint_threshold
246 self.tcx.sess.opts.debugging_opts.inline_mir_threshold
249 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
250 debug!("#[naked] present - not inlining");
254 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
255 debug!("#[cold] present - not inlining");
259 // Give a bonus functions with a small number of blocks,
260 // We normally have two or three blocks for even
261 // very small functions.
262 if callee_body.basic_blocks().len() <= 3 {
263 threshold += threshold / 4;
265 debug!(" final inline threshold = {}", threshold);
267 // FIXME: Give a bonus to functions with only a single caller
268 let mut first_block = true;
271 // Traverse the MIR manually so we can account for the effects of
272 // inlining on the CFG.
273 let mut work_list = vec![START_BLOCK];
274 let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
275 while let Some(bb) = work_list.pop() {
276 if !visited.insert(bb.index()) {
279 let blk = &callee_body.basic_blocks()[bb];
281 for stmt in &blk.statements {
282 // Don't count StorageLive/StorageDead in the inlining cost.
284 StatementKind::StorageLive(_)
285 | StatementKind::StorageDead(_)
286 | StatementKind::Nop => {}
287 _ => cost += INSTR_COST,
290 let term = blk.terminator();
291 let mut is_drop = false;
293 TerminatorKind::Drop { ref place, target, unwind }
294 | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => {
296 work_list.push(target);
297 // If the place doesn't actually need dropping, treat it like
299 let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty);
300 if ty.needs_drop(tcx, self.param_env) {
301 cost += CALL_PENALTY;
302 if let Some(unwind) = unwind {
303 cost += LANDINGPAD_PENALTY;
304 work_list.push(unwind);
311 TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. }
314 // If the function always diverges, don't inline
315 // unless the cost is zero
319 TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
320 if let ty::FnDef(def_id, substs) =
321 *callsite.callee.subst_mir(self.tcx, &f.literal.ty).kind()
323 let substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
324 if let Ok(Some(instance)) =
325 Instance::resolve(self.tcx, self.param_env, def_id, substs)
327 if callsite.callee == instance || self.history.contains(&instance) {
328 debug!("`callee is recursive - not inlining");
332 // Don't give intrinsics the extra penalty for calls
333 let f = tcx.fn_sig(def_id);
334 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
337 cost += CALL_PENALTY;
340 cost += CALL_PENALTY;
342 if cleanup.is_some() {
343 cost += LANDINGPAD_PENALTY;
346 TerminatorKind::Assert { cleanup, .. } => {
347 cost += CALL_PENALTY;
349 if cleanup.is_some() {
350 cost += LANDINGPAD_PENALTY;
353 TerminatorKind::Resume => cost += RESUME_PENALTY,
354 _ => cost += INSTR_COST,
358 for &succ in term.successors() {
359 work_list.push(succ);
366 // Count up the cost of local variables and temps, if we know the size
367 // use that, otherwise we use a moderately-large dummy cost.
369 let ptr_size = tcx.data_layout.pointer_size.bytes();
371 for v in callee_body.vars_and_temps_iter() {
372 let ty = callsite.callee.subst_mir(self.tcx, &callee_body.local_decls[v].ty);
373 // Cost of the var is the size in machine-words, if we know
375 if let Some(size) = type_size_of(tcx, self.param_env, ty) {
376 cost += (size / ptr_size) as usize;
378 cost += UNKNOWN_SIZE_COST;
382 if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
383 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
386 if cost <= threshold {
387 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
390 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
398 callsite: CallSite<'tcx>,
399 caller_body: &mut Body<'tcx>,
400 mut callee_body: Body<'tcx>,
402 let terminator = caller_body[callsite.block].terminator.take().unwrap();
403 match terminator.kind {
404 TerminatorKind::Call { args, destination, cleanup, .. } => {
405 // If the call is something like `a[*i] = f(i)`, where
406 // `i : &mut usize`, then just duplicating the `a[*i]`
407 // Place could result in two different locations if `f`
408 // writes to `i`. To prevent this we need to create a temporary
409 // borrow of the place and pass the destination as `*temp` instead.
410 fn dest_needs_borrow(place: Place<'_>) -> bool {
411 for elem in place.projection.iter() {
413 ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
421 let dest = if let Some((destination_place, _)) = destination {
422 if dest_needs_borrow(destination_place) {
423 trace!("creating temp for return destination");
424 let dest = Rvalue::Ref(
425 self.tcx.lifetimes.re_erased,
426 BorrowKind::Mut { allow_two_phase_borrow: false },
429 let dest_ty = dest.ty(caller_body, self.tcx);
430 let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty));
431 caller_body[callsite.block].statements.push(Statement {
432 source_info: callsite.source_info,
433 kind: StatementKind::Assign(box (temp, dest)),
435 self.tcx.mk_place_deref(temp)
440 trace!("creating temp for return place");
441 Place::from(self.new_call_temp(caller_body, &callsite, callee_body.return_ty()))
444 // Copy the arguments if needed.
445 let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body);
447 let mut integrator = Integrator {
449 new_locals: Local::new(caller_body.local_decls.len())..,
450 new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
451 new_blocks: BasicBlock::new(caller_body.basic_blocks().len())..,
453 return_block: callsite.target,
454 cleanup_block: cleanup,
455 in_cleanup_block: false,
457 callsite_span: callsite.source_info.span,
458 body_span: callee_body.span,
459 always_live_locals: BitSet::new_filled(callee_body.local_decls.len()),
462 // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
463 // (or existing ones, in a few special cases) in the caller.
464 integrator.visit_body(&mut callee_body);
466 for scope in &mut callee_body.source_scopes {
467 // FIXME(eddyb) move this into a `fn visit_scope_data` in `Integrator`.
468 if scope.parent_scope.is_none() {
469 let callsite_scope = &caller_body.source_scopes[callsite.source_info.scope];
471 // Attach the outermost callee scope as a child of the callsite
472 // scope, via the `parent_scope` and `inlined_parent_scope` chains.
473 scope.parent_scope = Some(callsite.source_info.scope);
474 assert_eq!(scope.inlined_parent_scope, None);
475 scope.inlined_parent_scope = if callsite_scope.inlined.is_some() {
476 Some(callsite.source_info.scope)
478 callsite_scope.inlined_parent_scope
481 // Mark the outermost callee scope as an inlined one.
482 assert_eq!(scope.inlined, None);
483 scope.inlined = Some((callsite.callee, callsite.source_info.span));
484 } else if scope.inlined_parent_scope.is_none() {
485 // Make it easy to find the scope with `inlined` set above.
486 scope.inlined_parent_scope =
487 Some(integrator.map_scope(OUTERMOST_SOURCE_SCOPE));
491 // If there are any locals without storage markers, give them storage only for the
492 // duration of the call.
493 for local in callee_body.vars_and_temps_iter() {
494 if integrator.always_live_locals.contains(local) {
495 let new_local = integrator.map_local(local);
496 caller_body[callsite.block].statements.push(Statement {
497 source_info: callsite.source_info,
498 kind: StatementKind::StorageLive(new_local),
502 if let Some(block) = callsite.target {
503 // To avoid repeated O(n) insert, push any new statements to the end and rotate
506 for local in callee_body.vars_and_temps_iter().rev() {
507 if integrator.always_live_locals.contains(local) {
508 let new_local = integrator.map_local(local);
509 caller_body[block].statements.push(Statement {
510 source_info: callsite.source_info,
511 kind: StatementKind::StorageDead(new_local),
516 caller_body[block].statements.rotate_right(n);
519 // Insert all of the (mapped) parts of the callee body into the caller.
520 caller_body.local_decls.extend(
521 // FIXME(eddyb) make `Range<Local>` iterable so that we can use
522 // `callee_body.local_decls.drain(callee_body.vars_and_temps())`
524 .vars_and_temps_iter()
525 .map(|local| callee_body.local_decls[local].clone()),
527 caller_body.source_scopes.extend(callee_body.source_scopes.drain(..));
528 caller_body.var_debug_info.extend(callee_body.var_debug_info.drain(..));
529 caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
531 caller_body[callsite.block].terminator = Some(Terminator {
532 source_info: callsite.source_info,
533 kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
536 // Copy only unevaluated constants from the callee_body into the caller_body.
537 // Although we are only pushing `ConstKind::Unevaluated` consts to
538 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
539 // because we are calling `subst_and_normalize_erasing_regions`.
540 caller_body.required_consts.extend(
541 callee_body.required_consts.iter().copied().filter(|&constant| {
542 matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
546 kind => bug!("unexpected terminator kind {:?}", kind),
552 args: Vec<Operand<'tcx>>,
553 callsite: &CallSite<'tcx>,
554 caller_body: &mut Body<'tcx>,
555 callee_body: &Body<'tcx>,
559 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
560 // The caller provides the arguments wrapped up in a tuple:
562 // tuple_tmp = (a, b, c)
563 // Fn::call(closure_ref, tuple_tmp)
565 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
566 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
567 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
570 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
572 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
573 // if we "spill" that into *another* temporary, so that we can map the argument
574 // variable in the callee MIR directly to an argument variable on our side.
575 // So we introduce temporaries like:
577 // tmp0 = tuple_tmp.0
578 // tmp1 = tuple_tmp.1
579 // tmp2 = tuple_tmp.2
581 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
582 if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() {
583 let mut args = args.into_iter();
584 let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
585 let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
586 assert!(args.next().is_none());
588 let tuple = Place::from(tuple);
589 let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.kind() {
592 bug!("Closure arguments are not passed as a tuple");
595 // The `closure_ref` in our example above.
596 let closure_ref_arg = iter::once(self_);
598 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
599 let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
600 // This is e.g., `tuple_tmp.0` in our example above.
602 Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty()));
604 // Spill to a local to make e.g., `tmp0`.
605 self.create_temp_if_necessary(tuple_field, callsite, caller_body)
608 closure_ref_arg.chain(tuple_tmp_args).collect()
611 .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
616 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
617 /// temporary `T` and an instruction `T = arg`, and returns `T`.
618 fn create_temp_if_necessary(
621 callsite: &CallSite<'tcx>,
622 caller_body: &mut Body<'tcx>,
624 // Reuse the operand if it is a moved temporary.
625 if let Operand::Move(place) = &arg {
626 if let Some(local) = place.as_local() {
627 if caller_body.local_kind(local) == LocalKind::Temp {
633 // Otherwise, create a temporary for the argument.
634 trace!("creating temp for argument {:?}", arg);
635 let arg_ty = arg.ty(caller_body, self.tcx);
636 let local = self.new_call_temp(caller_body, callsite, arg_ty);
637 caller_body[callsite.block].statements.push(Statement {
638 source_info: callsite.source_info,
639 kind: StatementKind::Assign(box (Place::from(local), Rvalue::Use(arg))),
644 /// Introduces a new temporary into the caller body that is live for the duration of the call.
647 caller_body: &mut Body<'tcx>,
648 callsite: &CallSite<'tcx>,
651 let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
653 caller_body[callsite.block].statements.push(Statement {
654 source_info: callsite.source_info,
655 kind: StatementKind::StorageLive(local),
658 if let Some(block) = callsite.target {
659 caller_body[block].statements.insert(
662 source_info: callsite.source_info,
663 kind: StatementKind::StorageDead(local),
672 fn type_size_of<'tcx>(
674 param_env: ty::ParamEnv<'tcx>,
677 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
683 * Integrates blocks from the callee function into the calling function.
684 * Updates block indices, references to locals and other control flow
687 struct Integrator<'a, 'tcx> {
689 new_locals: RangeFrom<Local>,
690 new_scopes: RangeFrom<SourceScope>,
691 new_blocks: RangeFrom<BasicBlock>,
692 destination: Place<'tcx>,
693 return_block: Option<BasicBlock>,
694 cleanup_block: Option<BasicBlock>,
695 in_cleanup_block: bool,
699 always_live_locals: BitSet<Local>,
702 impl<'a, 'tcx> Integrator<'a, 'tcx> {
703 fn map_local(&self, local: Local) -> Local {
704 let new = if local == RETURN_PLACE {
705 self.destination.local
707 let idx = local.index() - 1;
708 if idx < self.args.len() {
711 Local::new(self.new_locals.start.index() + (idx - self.args.len()))
714 trace!("mapping local `{:?}` to `{:?}`", local, new);
718 fn map_scope(&self, scope: SourceScope) -> SourceScope {
719 let new = SourceScope::new(self.new_scopes.start.index() + scope.index());
720 trace!("mapping scope `{:?}` to `{:?}`", scope, new);
724 fn map_block(&self, block: BasicBlock) -> BasicBlock {
725 let new = BasicBlock::new(self.new_blocks.start.index() + block.index());
726 trace!("mapping block `{:?}` to `{:?}`", block, new);
731 impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
732 fn tcx(&self) -> TyCtxt<'tcx> {
736 fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
737 *local = self.map_local(*local);
740 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
741 *scope = self.map_scope(*scope);
744 fn visit_span(&mut self, span: &mut Span) {
746 ExpnData::default(ExpnKind::Inlined, *span, self.tcx.sess.edition(), None);
747 expn_data.def_site = self.body_span;
748 // Make sure that all spans track the fact that they were inlined.
749 *span = self.callsite_span.fresh_expansion(expn_data);
752 fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
753 for elem in place.projection {
754 // FIXME: Make sure that return place is not used in an indexing projection, since it
755 // won't be rebased as it is supposed to be.
756 assert_ne!(ProjectionElem::Index(RETURN_PLACE), elem);
759 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
760 let dest_proj_len = self.destination.projection.len();
761 if place.local == RETURN_PLACE && dest_proj_len > 0 {
762 let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
763 projs.extend(self.destination.projection);
764 projs.extend(place.projection);
766 place.projection = self.tcx.intern_place_elems(&*projs);
768 // Handles integrating any locals that occur in the base
770 self.super_place(place, context, location)
773 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
774 self.in_cleanup_block = data.is_cleanup;
775 self.super_basic_block_data(block, data);
776 self.in_cleanup_block = false;
779 fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
780 self.super_retag(kind, place, loc);
782 // We have to patch all inlined retags to be aware that they are no longer
783 // happening on function entry.
784 if *kind == RetagKind::FnEntry {
785 *kind = RetagKind::Default;
789 fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
790 if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) =
793 self.always_live_locals.remove(local);
795 self.super_statement(statement, location);
798 fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
799 // Don't try to modify the implicit `_0` access on return (`return` terminators are
800 // replaced down below anyways).
801 if !matches!(terminator.kind, TerminatorKind::Return) {
802 self.super_terminator(terminator, loc);
805 match terminator.kind {
806 TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
807 TerminatorKind::Goto { ref mut target } => {
808 *target = self.map_block(*target);
810 TerminatorKind::SwitchInt { ref mut targets, .. } => {
811 for tgt in targets.all_targets_mut() {
812 *tgt = self.map_block(*tgt);
815 TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
816 | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
817 *target = self.map_block(*target);
818 if let Some(tgt) = *unwind {
819 *unwind = Some(self.map_block(tgt));
820 } else if !self.in_cleanup_block {
821 // Unless this drop is in a cleanup block, add an unwind edge to
822 // the original call's cleanup block
823 *unwind = self.cleanup_block;
826 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
827 if let Some((_, ref mut tgt)) = *destination {
828 *tgt = self.map_block(*tgt);
830 if let Some(tgt) = *cleanup {
831 *cleanup = Some(self.map_block(tgt));
832 } else if !self.in_cleanup_block {
833 // Unless this call is in a cleanup block, add an unwind edge to
834 // the original call's cleanup block
835 *cleanup = self.cleanup_block;
838 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
839 *target = self.map_block(*target);
840 if let Some(tgt) = *cleanup {
841 *cleanup = Some(self.map_block(tgt));
842 } else if !self.in_cleanup_block {
843 // Unless this assert is in a cleanup block, add an unwind edge to
844 // the original call's cleanup block
845 *cleanup = self.cleanup_block;
848 TerminatorKind::Return => {
849 terminator.kind = if let Some(tgt) = self.return_block {
850 TerminatorKind::Goto { target: tgt }
852 TerminatorKind::Unreachable
855 TerminatorKind::Resume => {
856 if let Some(tgt) = self.cleanup_block {
857 terminator.kind = TerminatorKind::Goto { target: tgt }
860 TerminatorKind::Abort => {}
861 TerminatorKind::Unreachable => {}
862 TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
863 *real_target = self.map_block(*real_target);
864 *imaginary_target = self.map_block(*imaginary_target);
866 TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
867 // see the ordering of passes in the optimized_mir query.
869 bug!("False unwinds should have been removed before inlining")
871 TerminatorKind::InlineAsm { ref mut destination, .. } => {
872 if let Some(ref mut tgt) = *destination {
873 *tgt = self.map_block(*tgt);