1 //! Inlining pass for MIR functions
3 use rustc_attr as attr;
4 use rustc_hir::def_id::DefId;
5 use rustc_index::bit_set::BitSet;
6 use rustc_index::vec::{Idx, IndexVec};
7 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
8 use rustc_middle::mir::visit::*;
9 use rustc_middle::mir::*;
10 use rustc_middle::ty::subst::{Subst, SubstsRef};
11 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
12 use rustc_session::config::Sanitizer;
13 use rustc_target::spec::abi::Abi;
15 use super::simplify::{remove_dead_blocks, CfgSimplifier};
16 use crate::transform::{MirPass, MirSource};
17 use std::collections::VecDeque;
20 const DEFAULT_THRESHOLD: usize = 50;
21 const HINT_THRESHOLD: usize = 100;
23 const INSTR_COST: usize = 5;
24 const CALL_PENALTY: usize = 25;
25 const LANDINGPAD_PENALTY: usize = 50;
26 const RESUME_PENALTY: usize = 45;
28 const UNKNOWN_SIZE_COST: usize = 10;
32 #[derive(Copy, Clone, Debug)]
33 struct CallSite<'tcx> {
35 substs: SubstsRef<'tcx>,
40 impl<'tcx> MirPass<'tcx> for Inline {
41 fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
42 if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
43 Inliner { tcx, source }.run_pass(body);
48 struct Inliner<'tcx> {
50 source: MirSource<'tcx>,
54 fn run_pass(&self, caller_body: &mut Body<'tcx>) {
55 // Keep a queue of callsites to try inlining on. We take
56 // advantage of the fact that queries detect cycles here to
57 // allow us to try and fetch the fully optimized MIR of a
58 // call; if it succeeds, we can inline it and we know that
59 // they do not call us. Otherwise, we just don't try to
62 // We use a queue so that we inline "broadly" before we inline
63 // in depth. It is unclear if this is the best heuristic,
64 // really, but that's true of all the heuristics in this
67 let mut callsites = VecDeque::new();
69 let param_env = self.tcx.param_env(self.source.def_id()).with_reveal_all();
71 // Only do inlining into fn bodies.
72 let id = self.tcx.hir().as_local_hir_id(self.source.def_id().expect_local());
73 if self.tcx.hir().body_owner_kind(id).is_fn_or_closure() && self.source.promoted.is_none() {
74 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
75 if let Some(callsite) =
76 self.get_valid_function_call(bb, bb_data, caller_body, param_env)
78 callsites.push_back(callsite);
86 let mut changed = false;
90 while let Some(callsite) = callsites.pop_front() {
91 debug!("checking whether to inline callsite {:?}", callsite);
92 if !self.tcx.is_mir_available(callsite.callee) {
93 debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite);
97 let callee_body = if let Some(callee_def_id) = callsite.callee.as_local() {
98 let callee_hir_id = self.tcx.hir().as_local_hir_id(callee_def_id);
100 self.tcx.hir().as_local_hir_id(self.source.def_id().expect_local());
101 // Avoid a cycle here by only using `optimized_mir` only if we have
102 // a lower `HirId` than the callee. This ensures that the callee will
103 // not inline us. This trick only works without incremental compilation.
104 // So don't do it if that is enabled.
105 if !self.tcx.dep_graph.is_fully_enabled() && self_hir_id < callee_hir_id {
106 self.tcx.optimized_mir(callsite.callee)
111 // This cannot result in a cycle since the callee MIR is from another crate
112 // and is already optimized.
113 self.tcx.optimized_mir(callsite.callee)
116 let callee_body = if self.consider_optimizing(callsite, callee_body) {
117 self.tcx.subst_and_normalize_erasing_regions(
126 // Copy only unevaluated constants from the callee_body into the caller_body.
127 // Although we are only pushing `ConstKind::Unevaluated` consts to
128 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
129 // because we are calling `subst_and_normalize_erasing_regions`.
130 caller_body.required_consts.extend(
131 callee_body.required_consts.iter().copied().filter(|&constant| {
132 matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
136 let start = caller_body.basic_blocks().len();
137 debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
138 if !self.inline_call(callsite, caller_body, callee_body) {
139 debug!("attempting to inline callsite {:?} - failure", callsite);
142 debug!("attempting to inline callsite {:?} - success", callsite);
144 // Add callsites from inlined function
145 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
146 if let Some(new_callsite) =
147 self.get_valid_function_call(bb, bb_data, caller_body, param_env)
149 // Don't inline the same function multiple times.
150 if callsite.callee != new_callsite.callee {
151 callsites.push_back(new_callsite);
165 // Simplify if we inlined anything.
167 debug!("running simplify cfg on {:?}", self.source);
168 CfgSimplifier::new(caller_body).simplify();
169 remove_dead_blocks(caller_body);
173 fn get_valid_function_call(
176 bb_data: &BasicBlockData<'tcx>,
177 caller_body: &Body<'tcx>,
178 param_env: ParamEnv<'tcx>,
179 ) -> Option<CallSite<'tcx>> {
180 // Don't inline calls that are in cleanup blocks.
181 if bb_data.is_cleanup {
185 // Only consider direct calls to functions
186 let terminator = bb_data.terminator();
187 if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
188 if let ty::FnDef(callee_def_id, substs) = op.ty(caller_body, self.tcx).kind {
190 Instance::resolve(self.tcx, param_env, callee_def_id, substs).ok().flatten()?;
192 if let InstanceDef::Virtual(..) = instance.def {
196 return Some(CallSite {
197 callee: instance.def_id(),
198 substs: instance.substs,
200 location: terminator.source_info,
208 fn consider_optimizing(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
209 debug!("consider_optimizing({:?})", callsite);
210 self.should_inline(callsite, callee_body)
211 && self.tcx.consider_optimizing(|| {
212 format!("Inline {:?} into {:?}", callee_body.span, callsite)
216 fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
217 debug!("should_inline({:?})", callsite);
220 // Cannot inline generators which haven't been transformed yet
221 if callee_body.yield_ty.is_some() {
222 debug!(" yield ty present - not inlining");
226 let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee);
228 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::TRACK_CALLER) {
229 debug!("`#[track_caller]` present - not inlining");
233 // Avoid inlining functions marked as no_sanitize if sanitizer is enabled,
234 // since instrumentation might be enabled and performed on the caller.
235 match self.tcx.sess.opts.debugging_opts.sanitizer {
236 Some(Sanitizer::Address) => {
237 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_SANITIZE_ADDRESS) {
241 Some(Sanitizer::Memory) => {
242 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_SANITIZE_MEMORY) {
246 Some(Sanitizer::Thread) => {
247 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_SANITIZE_THREAD) {
251 Some(Sanitizer::Leak) => {}
255 let hinted = match codegen_fn_attrs.inline {
256 // Just treat inline(always) as a hint for now,
257 // there are cases that prevent inlining that we
258 // need to check for first.
259 attr::InlineAttr::Always => true,
260 attr::InlineAttr::Never => {
261 debug!("`#[inline(never)]` present - not inlining");
264 attr::InlineAttr::Hint => true,
265 attr::InlineAttr::None => false,
268 // Only inline local functions if they would be eligible for cross-crate
269 // inlining. This is to ensure that the final crate doesn't have MIR that
270 // reference unexported symbols
271 if callsite.callee.is_local() {
272 if callsite.substs.non_erasable_generics().count() == 0 && !hinted {
273 debug!(" callee is an exported function - not inlining");
278 let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD };
280 // Significantly lower the threshold for inlining cold functions
281 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
285 // Give a bonus functions with a small number of blocks,
286 // We normally have two or three blocks for even
287 // very small functions.
288 if callee_body.basic_blocks().len() <= 3 {
289 threshold += threshold / 4;
291 debug!(" final inline threshold = {}", threshold);
293 // FIXME: Give a bonus to functions with only a single caller
295 let param_env = tcx.param_env(self.source.def_id());
297 let mut first_block = true;
300 // Traverse the MIR manually so we can account for the effects of
301 // inlining on the CFG.
302 let mut work_list = vec![START_BLOCK];
303 let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
304 while let Some(bb) = work_list.pop() {
305 if !visited.insert(bb.index()) {
308 let blk = &callee_body.basic_blocks()[bb];
310 for stmt in &blk.statements {
311 // Don't count StorageLive/StorageDead in the inlining cost.
313 StatementKind::StorageLive(_)
314 | StatementKind::StorageDead(_)
315 | StatementKind::Nop => {}
316 _ => cost += INSTR_COST,
319 let term = blk.terminator();
320 let mut is_drop = false;
322 TerminatorKind::Drop { ref location, target, unwind }
323 | TerminatorKind::DropAndReplace { ref location, target, unwind, .. } => {
325 work_list.push(target);
326 // If the location doesn't actually need dropping, treat it like
328 let ty = location.ty(callee_body, tcx).subst(tcx, callsite.substs).ty;
329 if ty.needs_drop(tcx, param_env) {
330 cost += CALL_PENALTY;
331 if let Some(unwind) = unwind {
332 cost += LANDINGPAD_PENALTY;
333 work_list.push(unwind);
340 TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. }
343 // If the function always diverges, don't inline
344 // unless the cost is zero
348 TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
349 if let ty::FnDef(def_id, _) = f.literal.ty.kind {
350 // Don't give intrinsics the extra penalty for calls
351 let f = tcx.fn_sig(def_id);
352 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
355 cost += CALL_PENALTY;
358 cost += CALL_PENALTY;
360 if cleanup.is_some() {
361 cost += LANDINGPAD_PENALTY;
364 TerminatorKind::Assert { cleanup, .. } => {
365 cost += CALL_PENALTY;
367 if cleanup.is_some() {
368 cost += LANDINGPAD_PENALTY;
371 TerminatorKind::Resume => cost += RESUME_PENALTY,
372 _ => cost += INSTR_COST,
376 for &succ in term.successors() {
377 work_list.push(succ);
384 // Count up the cost of local variables and temps, if we know the size
385 // use that, otherwise we use a moderately-large dummy cost.
387 let ptr_size = tcx.data_layout.pointer_size.bytes();
389 for v in callee_body.vars_and_temps_iter() {
390 let v = &callee_body.local_decls[v];
391 let ty = v.ty.subst(tcx, callsite.substs);
392 // Cost of the var is the size in machine-words, if we know
394 if let Some(size) = type_size_of(tcx, param_env, ty) {
395 cost += (size / ptr_size) as usize;
397 cost += UNKNOWN_SIZE_COST;
401 if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
402 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
405 if cost <= threshold {
406 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
409 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
417 callsite: CallSite<'tcx>,
418 caller_body: &mut Body<'tcx>,
419 mut callee_body: Body<'tcx>,
421 let terminator = caller_body[callsite.bb].terminator.take().unwrap();
422 match terminator.kind {
423 // FIXME: Handle inlining of diverging calls
424 TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
425 debug!("inlined {:?} into {:?}", callsite.callee, self.source);
427 let mut local_map = IndexVec::with_capacity(callee_body.local_decls.len());
428 let mut scope_map = IndexVec::with_capacity(callee_body.source_scopes.len());
430 for mut scope in callee_body.source_scopes.iter().cloned() {
431 if scope.parent_scope.is_none() {
432 scope.parent_scope = Some(callsite.location.scope);
433 // FIXME(eddyb) is this really needed?
434 // (also note that it's always overwritten below)
435 scope.span = callee_body.span;
438 // FIXME(eddyb) this doesn't seem right at all.
439 // The inlined source scopes should probably be annotated as
440 // such, but also contain all of the original information.
441 scope.span = callsite.location.span;
443 let idx = caller_body.source_scopes.push(scope);
447 for loc in callee_body.vars_and_temps_iter() {
448 let mut local = callee_body.local_decls[loc].clone();
450 local.source_info.scope = scope_map[local.source_info.scope];
451 local.source_info.span = callsite.location.span;
453 let idx = caller_body.local_decls.push(local);
457 // If the call is something like `a[*i] = f(i)`, where
458 // `i : &mut usize`, then just duplicating the `a[*i]`
459 // Place could result in two different locations if `f`
460 // writes to `i`. To prevent this we need to create a temporary
461 // borrow of the place and pass the destination as `*temp` instead.
462 fn dest_needs_borrow(place: Place<'_>) -> bool {
463 for elem in place.projection.iter() {
465 ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
473 let dest = if dest_needs_borrow(destination.0) {
474 debug!("creating temp for return destination");
475 let dest = Rvalue::Ref(
476 self.tcx.lifetimes.re_erased,
477 BorrowKind::Mut { allow_two_phase_borrow: false },
481 let ty = dest.ty(caller_body, self.tcx);
483 let temp = LocalDecl::new(ty, callsite.location.span);
485 let tmp = caller_body.local_decls.push(temp);
486 let tmp = Place::from(tmp);
488 let stmt = Statement {
489 source_info: callsite.location,
490 kind: StatementKind::Assign(box (tmp, dest)),
492 caller_body[callsite.bb].statements.push(stmt);
493 self.tcx.mk_place_deref(tmp)
498 let return_block = destination.1;
500 // Copy the arguments if needed.
501 let args: Vec<_> = self.make_call_args(args, &callsite, caller_body);
503 let bb_len = caller_body.basic_blocks().len();
504 let mut integrator = Integrator {
511 cleanup_block: cleanup,
512 in_cleanup_block: false,
516 for mut var_debug_info in callee_body.var_debug_info.drain(..) {
517 integrator.visit_var_debug_info(&mut var_debug_info);
518 caller_body.var_debug_info.push(var_debug_info);
521 for (bb, mut block) in callee_body.basic_blocks_mut().drain_enumerated(..) {
522 integrator.visit_basic_block_data(bb, &mut block);
523 caller_body.basic_blocks_mut().push(block);
526 let terminator = Terminator {
527 source_info: callsite.location,
528 kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) },
531 caller_body[callsite.bb].terminator = Some(terminator);
536 caller_body[callsite.bb].terminator =
537 Some(Terminator { source_info: terminator.source_info, kind });
545 args: Vec<Operand<'tcx>>,
546 callsite: &CallSite<'tcx>,
547 caller_body: &mut Body<'tcx>,
551 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
552 // The caller provides the arguments wrapped up in a tuple:
554 // tuple_tmp = (a, b, c)
555 // Fn::call(closure_ref, tuple_tmp)
557 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
558 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
559 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
562 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
564 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
565 // if we "spill" that into *another* temporary, so that we can map the argument
566 // variable in the callee MIR directly to an argument variable on our side.
567 // So we introduce temporaries like:
569 // tmp0 = tuple_tmp.0
570 // tmp1 = tuple_tmp.1
571 // tmp2 = tuple_tmp.2
573 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
574 if tcx.is_closure(callsite.callee) {
575 let mut args = args.into_iter();
576 let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
577 let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
578 assert!(args.next().is_none());
580 let tuple = Place::from(tuple);
581 let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.kind {
584 bug!("Closure arguments are not passed as a tuple");
587 // The `closure_ref` in our example above.
588 let closure_ref_arg = iter::once(self_);
590 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
591 let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
592 // This is e.g., `tuple_tmp.0` in our example above.
594 Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty()));
596 // Spill to a local to make e.g., `tmp0`.
597 self.create_temp_if_necessary(tuple_field, callsite, caller_body)
600 closure_ref_arg.chain(tuple_tmp_args).collect()
603 .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
608 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
609 /// temporary `T` and an instruction `T = arg`, and returns `T`.
610 fn create_temp_if_necessary(
613 callsite: &CallSite<'tcx>,
614 caller_body: &mut Body<'tcx>,
616 // FIXME: Analysis of the usage of the arguments to avoid
617 // unnecessary temporaries.
619 if let Operand::Move(place) = &arg {
620 if let Some(local) = place.as_local() {
621 if caller_body.local_kind(local) == LocalKind::Temp {
622 // Reuse the operand if it's a temporary already
628 debug!("creating temp for argument {:?}", arg);
629 // Otherwise, create a temporary for the arg
630 let arg = Rvalue::Use(arg);
632 let ty = arg.ty(caller_body, self.tcx);
634 let arg_tmp = LocalDecl::new(ty, callsite.location.span);
635 let arg_tmp = caller_body.local_decls.push(arg_tmp);
637 let stmt = Statement {
638 source_info: callsite.location,
639 kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)),
641 caller_body[callsite.bb].statements.push(stmt);
646 fn type_size_of<'tcx>(
648 param_env: ty::ParamEnv<'tcx>,
651 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
657 * Integrates blocks from the callee function into the calling function.
658 * Updates block indices, references to locals and other control flow
661 struct Integrator<'a, 'tcx> {
664 local_map: IndexVec<Local, Local>,
665 scope_map: IndexVec<SourceScope, SourceScope>,
666 destination: Place<'tcx>,
667 return_block: BasicBlock,
668 cleanup_block: Option<BasicBlock>,
669 in_cleanup_block: bool,
673 impl<'a, 'tcx> Integrator<'a, 'tcx> {
674 fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
675 let new = BasicBlock::new(tgt.index() + self.block_idx);
676 debug!("updating target `{:?}`, new: `{:?}`", tgt, new);
680 fn make_integrate_local(&self, local: Local) -> Local {
681 if local == RETURN_PLACE {
682 return self.destination.local;
685 let idx = local.index() - 1;
686 if idx < self.args.len() {
687 return self.args[idx];
690 self.local_map[Local::new(idx - self.args.len())]
694 impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
695 fn tcx(&self) -> TyCtxt<'tcx> {
699 fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
700 *local = self.make_integrate_local(*local);
703 fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
704 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
705 let dest_proj_len = self.destination.projection.len();
706 if place.local == RETURN_PLACE && dest_proj_len > 0 {
707 let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
708 projs.extend(self.destination.projection);
709 projs.extend(place.projection);
711 place.projection = self.tcx.intern_place_elems(&*projs);
713 // Handles integrating any locals that occur in the base
715 self.super_place(place, context, location)
718 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
719 self.in_cleanup_block = data.is_cleanup;
720 self.super_basic_block_data(block, data);
721 self.in_cleanup_block = false;
724 fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
725 self.super_retag(kind, place, loc);
727 // We have to patch all inlined retags to be aware that they are no longer
728 // happening on function entry.
729 if *kind == RetagKind::FnEntry {
730 *kind = RetagKind::Default;
734 fn visit_terminator_kind(&mut self, kind: &mut TerminatorKind<'tcx>, loc: Location) {
735 // Don't try to modify the implicit `_0` access on return (`return` terminators are
736 // replaced down below anyways).
737 if !matches!(kind, TerminatorKind::Return) {
738 self.super_terminator_kind(kind, loc);
742 TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
743 TerminatorKind::Goto { ref mut target } => {
744 *target = self.update_target(*target);
746 TerminatorKind::SwitchInt { ref mut targets, .. } => {
748 *tgt = self.update_target(*tgt);
751 TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
752 | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
753 *target = self.update_target(*target);
754 if let Some(tgt) = *unwind {
755 *unwind = Some(self.update_target(tgt));
756 } else if !self.in_cleanup_block {
757 // Unless this drop is in a cleanup block, add an unwind edge to
758 // the original call's cleanup block
759 *unwind = self.cleanup_block;
762 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
763 if let Some((_, ref mut tgt)) = *destination {
764 *tgt = self.update_target(*tgt);
766 if let Some(tgt) = *cleanup {
767 *cleanup = Some(self.update_target(tgt));
768 } else if !self.in_cleanup_block {
769 // Unless this call is in a cleanup block, add an unwind edge to
770 // the original call's cleanup block
771 *cleanup = self.cleanup_block;
774 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
775 *target = self.update_target(*target);
776 if let Some(tgt) = *cleanup {
777 *cleanup = Some(self.update_target(tgt));
778 } else if !self.in_cleanup_block {
779 // Unless this assert is in a cleanup block, add an unwind edge to
780 // the original call's cleanup block
781 *cleanup = self.cleanup_block;
784 TerminatorKind::Return => {
785 *kind = TerminatorKind::Goto { target: self.return_block };
787 TerminatorKind::Resume => {
788 if let Some(tgt) = self.cleanup_block {
789 *kind = TerminatorKind::Goto { target: tgt }
792 TerminatorKind::Abort => {}
793 TerminatorKind::Unreachable => {}
794 TerminatorKind::FalseEdges { ref mut real_target, ref mut imaginary_target } => {
795 *real_target = self.update_target(*real_target);
796 *imaginary_target = self.update_target(*imaginary_target);
798 TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
799 // see the ordering of passes in the optimized_mir query.
801 bug!("False unwinds should have been removed before inlining")
806 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
807 *scope = self.scope_map[*scope];