1 //! Inlining pass for MIR functions
3 use rustc_attr as attr;
4 use rustc_index::bit_set::BitSet;
5 use rustc_index::vec::Idx;
6 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
7 use rustc_middle::mir::visit::*;
8 use rustc_middle::mir::*;
9 use rustc_middle::ty::subst::Subst;
10 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
11 use rustc_span::{hygiene::ExpnKind, ExpnData, Span};
12 use rustc_target::spec::abi::Abi;
14 use super::simplify::{remove_dead_blocks, CfgSimplifier};
15 use crate::transform::MirPass;
16 use std::collections::VecDeque;
18 use std::ops::RangeFrom;
20 const DEFAULT_THRESHOLD: usize = 50;
21 const HINT_THRESHOLD: usize = 100;
23 const INSTR_COST: usize = 5;
24 const CALL_PENALTY: usize = 25;
25 const LANDINGPAD_PENALTY: usize = 50;
26 const RESUME_PENALTY: usize = 45;
28 const UNKNOWN_SIZE_COST: usize = 10;
32 #[derive(Copy, Clone, Debug)]
33 struct CallSite<'tcx> {
34 callee: Instance<'tcx>,
36 source_info: SourceInfo,
39 impl<'tcx> MirPass<'tcx> for Inline {
40 fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
41 if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
42 if tcx.sess.opts.debugging_opts.instrument_coverage {
43 // The current implementation of source code coverage injects code region counters
44 // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code-
46 debug!("function inlining is disabled when compiling with `instrument_coverage`");
50 param_env: tcx.param_env_reveal_all_normalized(body.source.def_id()),
51 codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()),
59 struct Inliner<'tcx> {
61 param_env: ParamEnv<'tcx>,
62 codegen_fn_attrs: &'tcx CodegenFnAttrs,
66 fn run_pass(&self, caller_body: &mut Body<'tcx>) {
67 // Keep a queue of callsites to try inlining on. We take
68 // advantage of the fact that queries detect cycles here to
69 // allow us to try and fetch the fully optimized MIR of a
70 // call; if it succeeds, we can inline it and we know that
71 // they do not call us. Otherwise, we just don't try to
74 // We use a queue so that we inline "broadly" before we inline
75 // in depth. It is unclear if this is the best heuristic,
76 // really, but that's true of all the heuristics in this
79 let mut callsites = VecDeque::new();
81 let def_id = caller_body.source.def_id();
83 // Only do inlining into fn bodies.
84 let self_hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
85 if self.tcx.hir().body_owner_kind(self_hir_id).is_fn_or_closure()
86 && caller_body.source.promoted.is_none()
88 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
89 if let Some(callsite) = self.get_valid_function_call(bb, bb_data, caller_body) {
90 callsites.push_back(callsite);
97 let mut changed = false;
98 while let Some(callsite) = callsites.pop_front() {
99 debug!("checking whether to inline callsite {:?}", callsite);
101 if let InstanceDef::Item(_) = callsite.callee.def {
102 if !self.tcx.is_mir_available(callsite.callee.def_id()) {
103 debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite,);
108 let callee_body = if let Some(callee_def_id) = callsite.callee.def_id().as_local() {
109 let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
110 // Avoid a cycle here by only using `instance_mir` only if we have
111 // a lower `HirId` than the callee. This ensures that the callee will
112 // not inline us. This trick only works without incremental compilation.
113 // So don't do it if that is enabled. Also avoid inlining into generators,
114 // since their `optimized_mir` is used for layout computation, which can
115 // create a cycle, even when no attempt is made to inline the function
116 // in the other direction.
117 if !self.tcx.dep_graph.is_fully_enabled()
118 && self_hir_id < callee_hir_id
119 && caller_body.generator_kind.is_none()
121 self.tcx.instance_mir(callsite.callee.def)
126 // This cannot result in a cycle since the callee MIR is from another crate
127 // and is already optimized.
128 self.tcx.instance_mir(callsite.callee.def)
131 let callee_body: &Body<'tcx> = &*callee_body;
133 let callee_body = if self.consider_optimizing(callsite, callee_body) {
134 self.tcx.subst_and_normalize_erasing_regions(
135 &callsite.callee.substs,
143 // Copy only unevaluated constants from the callee_body into the caller_body.
144 // Although we are only pushing `ConstKind::Unevaluated` consts to
145 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
146 // because we are calling `subst_and_normalize_erasing_regions`.
147 caller_body.required_consts.extend(callee_body.required_consts.iter().copied().filter(
148 |&constant| matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _)),
151 let start = caller_body.basic_blocks().len();
152 debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
153 if !self.inline_call(callsite, caller_body, callee_body) {
154 debug!("attempting to inline callsite {:?} - failure", callsite);
157 debug!("attempting to inline callsite {:?} - success", callsite);
159 // Add callsites from inlined function
160 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
161 if let Some(new_callsite) = self.get_valid_function_call(bb, bb_data, caller_body) {
162 // Don't inline the same function multiple times.
163 if callsite.callee != new_callsite.callee {
164 callsites.push_back(new_callsite);
172 // Simplify if we inlined anything.
174 debug!("running simplify cfg on {:?}", caller_body.source);
175 CfgSimplifier::new(caller_body).simplify();
176 remove_dead_blocks(caller_body);
180 fn get_valid_function_call(
183 bb_data: &BasicBlockData<'tcx>,
184 caller_body: &Body<'tcx>,
185 ) -> Option<CallSite<'tcx>> {
186 // Don't inline calls that are in cleanup blocks.
187 if bb_data.is_cleanup {
191 // Only consider direct calls to functions
192 let terminator = bb_data.terminator();
193 if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
194 if let ty::FnDef(callee_def_id, substs) = *op.ty(caller_body, self.tcx).kind() {
195 // To resolve an instance its substs have to be fully normalized, so
197 let normalized_substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
199 Instance::resolve(self.tcx, self.param_env, callee_def_id, normalized_substs)
203 if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
207 return Some(CallSite { callee, bb, source_info: terminator.source_info });
214 fn consider_optimizing(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
215 debug!("consider_optimizing({:?})", callsite);
216 self.should_inline(callsite, callee_body)
217 && self.tcx.consider_optimizing(|| {
218 format!("Inline {:?} into {:?}", callee_body.span, callsite)
222 fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
223 debug!("should_inline({:?})", callsite);
226 // Cannot inline generators which haven't been transformed yet
227 if callee_body.yield_ty.is_some() {
228 debug!(" yield ty present - not inlining");
232 let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee.def_id());
234 let self_features = &self.codegen_fn_attrs.target_features;
235 let callee_features = &codegen_fn_attrs.target_features;
236 if callee_features.iter().any(|feature| !self_features.contains(feature)) {
237 debug!("`callee has extra target features - not inlining");
241 let self_no_sanitize =
242 self.codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer;
243 let callee_no_sanitize =
244 codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer;
245 if self_no_sanitize != callee_no_sanitize {
246 debug!("`callee has incompatible no_sanitize attribute - not inlining");
250 let hinted = match codegen_fn_attrs.inline {
251 // Just treat inline(always) as a hint for now,
252 // there are cases that prevent inlining that we
253 // need to check for first.
254 attr::InlineAttr::Always => true,
255 attr::InlineAttr::Never => {
256 debug!("`#[inline(never)]` present - not inlining");
259 attr::InlineAttr::Hint => true,
260 attr::InlineAttr::None => false,
263 // Only inline local functions if they would be eligible for cross-crate
264 // inlining. This is to ensure that the final crate doesn't have MIR that
265 // reference unexported symbols
266 if callsite.callee.def_id().is_local() {
267 if callsite.callee.substs.non_erasable_generics().count() == 0 && !hinted {
268 debug!(" callee is an exported function - not inlining");
273 let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD };
275 // Significantly lower the threshold for inlining cold functions
276 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
280 // Give a bonus functions with a small number of blocks,
281 // We normally have two or three blocks for even
282 // very small functions.
283 if callee_body.basic_blocks().len() <= 3 {
284 threshold += threshold / 4;
286 debug!(" final inline threshold = {}", threshold);
288 // FIXME: Give a bonus to functions with only a single caller
289 let mut first_block = true;
292 // Traverse the MIR manually so we can account for the effects of
293 // inlining on the CFG.
294 let mut work_list = vec![START_BLOCK];
295 let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
296 while let Some(bb) = work_list.pop() {
297 if !visited.insert(bb.index()) {
300 let blk = &callee_body.basic_blocks()[bb];
302 for stmt in &blk.statements {
303 // Don't count StorageLive/StorageDead in the inlining cost.
305 StatementKind::StorageLive(_)
306 | StatementKind::StorageDead(_)
307 | StatementKind::Nop => {}
308 _ => cost += INSTR_COST,
311 let term = blk.terminator();
312 let mut is_drop = false;
314 TerminatorKind::Drop { ref place, target, unwind }
315 | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => {
317 work_list.push(target);
318 // If the place doesn't actually need dropping, treat it like
320 let ty = place.ty(callee_body, tcx).subst(tcx, callsite.callee.substs).ty;
321 if ty.needs_drop(tcx, self.param_env) {
322 cost += CALL_PENALTY;
323 if let Some(unwind) = unwind {
324 cost += LANDINGPAD_PENALTY;
325 work_list.push(unwind);
332 TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. }
335 // If the function always diverges, don't inline
336 // unless the cost is zero
340 TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
341 if let ty::FnDef(def_id, _) = *f.literal.ty.kind() {
342 // Don't give intrinsics the extra penalty for calls
343 let f = tcx.fn_sig(def_id);
344 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
347 cost += CALL_PENALTY;
350 cost += CALL_PENALTY;
352 if cleanup.is_some() {
353 cost += LANDINGPAD_PENALTY;
356 TerminatorKind::Assert { cleanup, .. } => {
357 cost += CALL_PENALTY;
359 if cleanup.is_some() {
360 cost += LANDINGPAD_PENALTY;
363 TerminatorKind::Resume => cost += RESUME_PENALTY,
364 _ => cost += INSTR_COST,
368 for &succ in term.successors() {
369 work_list.push(succ);
376 // Count up the cost of local variables and temps, if we know the size
377 // use that, otherwise we use a moderately-large dummy cost.
379 let ptr_size = tcx.data_layout.pointer_size.bytes();
381 for v in callee_body.vars_and_temps_iter() {
382 let v = &callee_body.local_decls[v];
383 let ty = v.ty.subst(tcx, callsite.callee.substs);
384 // Cost of the var is the size in machine-words, if we know
386 if let Some(size) = type_size_of(tcx, self.param_env, ty) {
387 cost += (size / ptr_size) as usize;
389 cost += UNKNOWN_SIZE_COST;
393 if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
394 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
397 if cost <= threshold {
398 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
401 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
409 callsite: CallSite<'tcx>,
410 caller_body: &mut Body<'tcx>,
411 mut callee_body: Body<'tcx>,
413 let terminator = caller_body[callsite.bb].terminator.take().unwrap();
414 match terminator.kind {
415 // FIXME: Handle inlining of diverging calls
416 TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
417 debug!("inlined {:?} into {:?}", callsite.callee, caller_body.source);
419 // If the call is something like `a[*i] = f(i)`, where
420 // `i : &mut usize`, then just duplicating the `a[*i]`
421 // Place could result in two different locations if `f`
422 // writes to `i`. To prevent this we need to create a temporary
423 // borrow of the place and pass the destination as `*temp` instead.
424 fn dest_needs_borrow(place: Place<'_>) -> bool {
425 for elem in place.projection.iter() {
427 ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
435 let dest = if dest_needs_borrow(destination.0) {
436 debug!("creating temp for return destination");
437 let dest = Rvalue::Ref(
438 self.tcx.lifetimes.re_erased,
439 BorrowKind::Mut { allow_two_phase_borrow: false },
443 let ty = dest.ty(caller_body, self.tcx);
445 let temp = LocalDecl::new(ty, callsite.source_info.span);
447 let tmp = caller_body.local_decls.push(temp);
448 let tmp = Place::from(tmp);
450 let stmt = Statement {
451 source_info: callsite.source_info,
452 kind: StatementKind::Assign(box (tmp, dest)),
454 caller_body[callsite.bb].statements.push(stmt);
455 self.tcx.mk_place_deref(tmp)
460 let return_block = destination.1;
462 // Copy the arguments if needed.
463 let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, return_block);
465 let mut integrator = Integrator {
467 new_locals: Local::new(caller_body.local_decls.len())..,
468 new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
469 new_blocks: BasicBlock::new(caller_body.basic_blocks().len())..,
472 cleanup_block: cleanup,
473 in_cleanup_block: false,
475 callsite_span: callsite.source_info.span,
476 body_span: callee_body.span,
479 // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
480 // (or existing ones, in a few special cases) in the caller.
481 integrator.visit_body(&mut callee_body);
483 for scope in &mut callee_body.source_scopes {
484 // FIXME(eddyb) move this into a `fn visit_scope_data` in `Integrator`.
485 if scope.parent_scope.is_none() {
486 let callsite_scope = &caller_body.source_scopes[callsite.source_info.scope];
488 // Attach the outermost callee scope as a child of the callsite
489 // scope, via the `parent_scope` and `inlined_parent_scope` chains.
490 scope.parent_scope = Some(callsite.source_info.scope);
491 assert_eq!(scope.inlined_parent_scope, None);
492 scope.inlined_parent_scope = if callsite_scope.inlined.is_some() {
493 Some(callsite.source_info.scope)
495 callsite_scope.inlined_parent_scope
498 // Mark the outermost callee scope as an inlined one.
499 assert_eq!(scope.inlined, None);
500 scope.inlined = Some((callsite.callee, callsite.source_info.span));
501 } else if scope.inlined_parent_scope.is_none() {
502 // Make it easy to find the scope with `inlined` set above.
503 scope.inlined_parent_scope =
504 Some(integrator.map_scope(OUTERMOST_SOURCE_SCOPE));
508 // Insert all of the (mapped) parts of the callee body into the caller.
509 caller_body.local_decls.extend(
510 // FIXME(eddyb) make `Range<Local>` iterable so that we can use
511 // `callee_body.local_decls.drain(callee_body.vars_and_temps())`
513 .vars_and_temps_iter()
514 .map(|local| callee_body.local_decls[local].clone()),
516 caller_body.source_scopes.extend(callee_body.source_scopes.drain(..));
517 caller_body.var_debug_info.extend(callee_body.var_debug_info.drain(..));
518 caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
520 caller_body[callsite.bb].terminator = Some(Terminator {
521 source_info: callsite.source_info,
522 kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
528 caller_body[callsite.bb].terminator =
529 Some(Terminator { source_info: terminator.source_info, kind });
537 args: Vec<Operand<'tcx>>,
538 callsite: &CallSite<'tcx>,
539 caller_body: &mut Body<'tcx>,
540 return_block: BasicBlock,
544 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
545 // The caller provides the arguments wrapped up in a tuple:
547 // tuple_tmp = (a, b, c)
548 // Fn::call(closure_ref, tuple_tmp)
550 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
551 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
552 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
555 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
557 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
558 // if we "spill" that into *another* temporary, so that we can map the argument
559 // variable in the callee MIR directly to an argument variable on our side.
560 // So we introduce temporaries like:
562 // tmp0 = tuple_tmp.0
563 // tmp1 = tuple_tmp.1
564 // tmp2 = tuple_tmp.2
566 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
567 // FIXME(eddyb) make this check for `"rust-call"` ABI combined with
568 // `callee_body.spread_arg == None`, instead of special-casing closures.
569 if tcx.is_closure(callsite.callee.def_id()) {
570 let mut args = args.into_iter();
571 let self_ = self.create_temp_if_necessary(
572 args.next().unwrap(),
577 let tuple = self.create_temp_if_necessary(
578 args.next().unwrap(),
583 assert!(args.next().is_none());
585 let tuple = Place::from(tuple);
586 let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.kind() {
589 bug!("Closure arguments are not passed as a tuple");
592 // The `closure_ref` in our example above.
593 let closure_ref_arg = iter::once(self_);
595 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
596 let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
597 // This is e.g., `tuple_tmp.0` in our example above.
599 Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty()));
601 // Spill to a local to make e.g., `tmp0`.
602 self.create_temp_if_necessary(tuple_field, callsite, caller_body, return_block)
605 closure_ref_arg.chain(tuple_tmp_args).collect()
608 .map(|a| self.create_temp_if_necessary(a, callsite, caller_body, return_block))
613 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
614 /// temporary `T` and an instruction `T = arg`, and returns `T`.
615 fn create_temp_if_necessary(
618 callsite: &CallSite<'tcx>,
619 caller_body: &mut Body<'tcx>,
620 return_block: BasicBlock,
622 // FIXME: Analysis of the usage of the arguments to avoid
623 // unnecessary temporaries.
625 if let Operand::Move(place) = &arg {
626 if let Some(local) = place.as_local() {
627 if caller_body.local_kind(local) == LocalKind::Temp {
628 // Reuse the operand if it's a temporary already
634 debug!("creating temp for argument {:?}", arg);
635 // Otherwise, create a temporary for the arg
636 let arg = Rvalue::Use(arg);
638 let ty = arg.ty(caller_body, self.tcx);
640 let arg_tmp = LocalDecl::new(ty, callsite.source_info.span);
641 let arg_tmp = caller_body.local_decls.push(arg_tmp);
643 caller_body[callsite.bb].statements.push(Statement {
644 source_info: callsite.source_info,
645 kind: StatementKind::StorageLive(arg_tmp),
647 caller_body[callsite.bb].statements.push(Statement {
648 source_info: callsite.source_info,
649 kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)),
651 caller_body[return_block].statements.insert(
654 source_info: callsite.source_info,
655 kind: StatementKind::StorageDead(arg_tmp),
663 fn type_size_of<'tcx>(
665 param_env: ty::ParamEnv<'tcx>,
668 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
674 * Integrates blocks from the callee function into the calling function.
675 * Updates block indices, references to locals and other control flow
678 struct Integrator<'a, 'tcx> {
680 new_locals: RangeFrom<Local>,
681 new_scopes: RangeFrom<SourceScope>,
682 new_blocks: RangeFrom<BasicBlock>,
683 destination: Place<'tcx>,
684 return_block: BasicBlock,
685 cleanup_block: Option<BasicBlock>,
686 in_cleanup_block: bool,
692 impl<'a, 'tcx> Integrator<'a, 'tcx> {
693 fn map_local(&self, local: Local) -> Local {
694 let new = if local == RETURN_PLACE {
695 self.destination.local
697 let idx = local.index() - 1;
698 if idx < self.args.len() {
701 Local::new(self.new_locals.start.index() + (idx - self.args.len()))
704 debug!("mapping local `{:?}` to `{:?}`", local, new);
708 fn map_scope(&self, scope: SourceScope) -> SourceScope {
709 let new = SourceScope::new(self.new_scopes.start.index() + scope.index());
710 debug!("mapping scope `{:?}` to `{:?}`", scope, new);
714 fn map_block(&self, block: BasicBlock) -> BasicBlock {
715 let new = BasicBlock::new(self.new_blocks.start.index() + block.index());
716 debug!("mapping block `{:?}` to `{:?}`", block, new);
721 impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
722 fn tcx(&self) -> TyCtxt<'tcx> {
726 fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
727 *local = self.map_local(*local);
730 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
731 *scope = self.map_scope(*scope);
734 fn visit_span(&mut self, span: &mut Span) {
735 // Make sure that all spans track the fact that they were inlined.
736 *span = self.callsite_span.fresh_expansion(ExpnData {
737 def_site: self.body_span,
738 ..ExpnData::default(ExpnKind::Inlined, *span, self.tcx.sess.edition(), None)
742 fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
743 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
744 let dest_proj_len = self.destination.projection.len();
745 if place.local == RETURN_PLACE && dest_proj_len > 0 {
746 let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
747 projs.extend(self.destination.projection);
748 projs.extend(place.projection);
750 place.projection = self.tcx.intern_place_elems(&*projs);
752 // Handles integrating any locals that occur in the base
754 self.super_place(place, context, location)
757 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
758 self.in_cleanup_block = data.is_cleanup;
759 self.super_basic_block_data(block, data);
760 self.in_cleanup_block = false;
763 fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
764 self.super_retag(kind, place, loc);
766 // We have to patch all inlined retags to be aware that they are no longer
767 // happening on function entry.
768 if *kind == RetagKind::FnEntry {
769 *kind = RetagKind::Default;
773 fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
774 // Don't try to modify the implicit `_0` access on return (`return` terminators are
775 // replaced down below anyways).
776 if !matches!(terminator.kind, TerminatorKind::Return) {
777 self.super_terminator(terminator, loc);
780 match terminator.kind {
781 TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
782 TerminatorKind::Goto { ref mut target } => {
783 *target = self.map_block(*target);
785 TerminatorKind::SwitchInt { ref mut targets, .. } => {
786 for tgt in targets.all_targets_mut() {
787 *tgt = self.map_block(*tgt);
790 TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
791 | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
792 *target = self.map_block(*target);
793 if let Some(tgt) = *unwind {
794 *unwind = Some(self.map_block(tgt));
795 } else if !self.in_cleanup_block {
796 // Unless this drop is in a cleanup block, add an unwind edge to
797 // the original call's cleanup block
798 *unwind = self.cleanup_block;
801 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
802 if let Some((_, ref mut tgt)) = *destination {
803 *tgt = self.map_block(*tgt);
805 if let Some(tgt) = *cleanup {
806 *cleanup = Some(self.map_block(tgt));
807 } else if !self.in_cleanup_block {
808 // Unless this call is in a cleanup block, add an unwind edge to
809 // the original call's cleanup block
810 *cleanup = self.cleanup_block;
813 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
814 *target = self.map_block(*target);
815 if let Some(tgt) = *cleanup {
816 *cleanup = Some(self.map_block(tgt));
817 } else if !self.in_cleanup_block {
818 // Unless this assert is in a cleanup block, add an unwind edge to
819 // the original call's cleanup block
820 *cleanup = self.cleanup_block;
823 TerminatorKind::Return => {
824 terminator.kind = TerminatorKind::Goto { target: self.return_block };
826 TerminatorKind::Resume => {
827 if let Some(tgt) = self.cleanup_block {
828 terminator.kind = TerminatorKind::Goto { target: tgt }
831 TerminatorKind::Abort => {}
832 TerminatorKind::Unreachable => {}
833 TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
834 *real_target = self.map_block(*real_target);
835 *imaginary_target = self.map_block(*imaginary_target);
837 TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
838 // see the ordering of passes in the optimized_mir query.
840 bug!("False unwinds should have been removed before inlining")
842 TerminatorKind::InlineAsm { ref mut destination, .. } => {
843 if let Some(ref mut tgt) = *destination {
844 *tgt = self.map_block(*tgt);