1 //! Inlining pass for MIR functions
3 use rustc_attr as attr;
4 use rustc_hir::def_id::DefId;
5 use rustc_index::bit_set::BitSet;
6 use rustc_index::vec::{Idx, IndexVec};
7 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
8 use rustc_middle::mir::visit::*;
9 use rustc_middle::mir::*;
10 use rustc_middle::ty::subst::{Subst, SubstsRef};
11 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
12 use rustc_target::spec::abi::Abi;
14 use super::simplify::{remove_dead_blocks, CfgSimplifier};
15 use crate::transform::{MirPass, MirSource};
16 use std::collections::VecDeque;
19 const DEFAULT_THRESHOLD: usize = 50;
20 const HINT_THRESHOLD: usize = 100;
22 const INSTR_COST: usize = 5;
23 const CALL_PENALTY: usize = 25;
24 const LANDINGPAD_PENALTY: usize = 50;
25 const RESUME_PENALTY: usize = 45;
27 const UNKNOWN_SIZE_COST: usize = 10;
31 #[derive(Copy, Clone, Debug)]
32 struct CallSite<'tcx> {
34 substs: SubstsRef<'tcx>,
39 impl<'tcx> MirPass<'tcx> for Inline {
40 fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
41 if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
42 if tcx.sess.opts.debugging_opts.instrument_coverage {
43 // The current implementation of source code coverage injects code region counters
44 // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code-
46 debug!("function inlining is disabled when compiling with `instrument_coverage`");
48 Inliner { tcx, source, codegen_fn_attrs: tcx.codegen_fn_attrs(source.def_id()) }
55 struct Inliner<'tcx> {
57 source: MirSource<'tcx>,
58 codegen_fn_attrs: &'tcx CodegenFnAttrs,
62 fn run_pass(&self, caller_body: &mut Body<'tcx>) {
63 // Keep a queue of callsites to try inlining on. We take
64 // advantage of the fact that queries detect cycles here to
65 // allow us to try and fetch the fully optimized MIR of a
66 // call; if it succeeds, we can inline it and we know that
67 // they do not call us. Otherwise, we just don't try to
70 // We use a queue so that we inline "broadly" before we inline
71 // in depth. It is unclear if this is the best heuristic,
72 // really, but that's true of all the heuristics in this
75 let mut callsites = VecDeque::new();
77 let param_env = self.tcx.param_env_reveal_all_normalized(self.source.def_id());
79 // Only do inlining into fn bodies.
80 let id = self.tcx.hir().local_def_id_to_hir_id(self.source.def_id().expect_local());
81 if self.tcx.hir().body_owner_kind(id).is_fn_or_closure() && self.source.promoted.is_none() {
82 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
83 if let Some(callsite) =
84 self.get_valid_function_call(bb, bb_data, caller_body, param_env)
86 callsites.push_back(callsite);
94 let mut changed = false;
98 while let Some(callsite) = callsites.pop_front() {
99 debug!("checking whether to inline callsite {:?}", callsite);
100 if !self.tcx.is_mir_available(callsite.callee) {
101 debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite);
105 let callee_body = if let Some(callee_def_id) = callsite.callee.as_local() {
106 let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
108 self.tcx.hir().local_def_id_to_hir_id(self.source.def_id().expect_local());
109 // Avoid a cycle here by only using `optimized_mir` only if we have
110 // a lower `HirId` than the callee. This ensures that the callee will
111 // not inline us. This trick only works without incremental compilation.
112 // So don't do it if that is enabled. Also avoid inlining into generators,
113 // since their `optimized_mir` is used for layout computation, which can
114 // create a cycle, even when no attempt is made to inline the function
115 // in the other direction.
116 if !self.tcx.dep_graph.is_fully_enabled()
117 && self_hir_id < callee_hir_id
118 && caller_body.generator_kind.is_none()
120 self.tcx.optimized_mir(callsite.callee)
125 // This cannot result in a cycle since the callee MIR is from another crate
126 // and is already optimized.
127 self.tcx.optimized_mir(callsite.callee)
130 let callee_body = if self.consider_optimizing(callsite, callee_body) {
131 self.tcx.subst_and_normalize_erasing_regions(
140 // Copy only unevaluated constants from the callee_body into the caller_body.
141 // Although we are only pushing `ConstKind::Unevaluated` consts to
142 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
143 // because we are calling `subst_and_normalize_erasing_regions`.
144 caller_body.required_consts.extend(
145 callee_body.required_consts.iter().copied().filter(|&constant| {
146 matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
150 let start = caller_body.basic_blocks().len();
151 debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
152 if !self.inline_call(callsite, caller_body, callee_body) {
153 debug!("attempting to inline callsite {:?} - failure", callsite);
156 debug!("attempting to inline callsite {:?} - success", callsite);
158 // Add callsites from inlined function
159 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
160 if let Some(new_callsite) =
161 self.get_valid_function_call(bb, bb_data, caller_body, param_env)
163 // Don't inline the same function multiple times.
164 if callsite.callee != new_callsite.callee {
165 callsites.push_back(new_callsite);
179 // Simplify if we inlined anything.
181 debug!("running simplify cfg on {:?}", self.source);
182 CfgSimplifier::new(caller_body).simplify();
183 remove_dead_blocks(caller_body);
187 fn get_valid_function_call(
190 bb_data: &BasicBlockData<'tcx>,
191 caller_body: &Body<'tcx>,
192 param_env: ParamEnv<'tcx>,
193 ) -> Option<CallSite<'tcx>> {
194 // Don't inline calls that are in cleanup blocks.
195 if bb_data.is_cleanup {
199 // Only consider direct calls to functions
200 let terminator = bb_data.terminator();
201 if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
202 if let ty::FnDef(callee_def_id, substs) = *op.ty(caller_body, self.tcx).kind() {
204 Instance::resolve(self.tcx, param_env, callee_def_id, substs).ok().flatten()?;
206 if let InstanceDef::Virtual(..) = instance.def {
210 return Some(CallSite {
211 callee: instance.def_id(),
212 substs: instance.substs,
214 location: terminator.source_info,
222 fn consider_optimizing(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
223 debug!("consider_optimizing({:?})", callsite);
224 self.should_inline(callsite, callee_body)
225 && self.tcx.consider_optimizing(|| {
226 format!("Inline {:?} into {:?}", callee_body.span, callsite)
230 fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
231 debug!("should_inline({:?})", callsite);
234 // Cannot inline generators which haven't been transformed yet
235 if callee_body.yield_ty.is_some() {
236 debug!(" yield ty present - not inlining");
240 let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee);
242 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::TRACK_CALLER) {
243 debug!("`#[track_caller]` present - not inlining");
247 let self_features = &self.codegen_fn_attrs.target_features;
248 let callee_features = &codegen_fn_attrs.target_features;
249 if callee_features.iter().any(|feature| !self_features.contains(feature)) {
250 debug!("`callee has extra target features - not inlining");
254 let self_no_sanitize =
255 self.codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer;
256 let callee_no_sanitize =
257 codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer;
258 if self_no_sanitize != callee_no_sanitize {
259 debug!("`callee has incompatible no_sanitize attribute - not inlining");
263 let hinted = match codegen_fn_attrs.inline {
264 // Just treat inline(always) as a hint for now,
265 // there are cases that prevent inlining that we
266 // need to check for first.
267 attr::InlineAttr::Always => true,
268 attr::InlineAttr::Never => {
269 debug!("`#[inline(never)]` present - not inlining");
272 attr::InlineAttr::Hint => true,
273 attr::InlineAttr::None => false,
276 // Only inline local functions if they would be eligible for cross-crate
277 // inlining. This is to ensure that the final crate doesn't have MIR that
278 // reference unexported symbols
279 if callsite.callee.is_local() {
280 if callsite.substs.non_erasable_generics().count() == 0 && !hinted {
281 debug!(" callee is an exported function - not inlining");
286 let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD };
288 // Significantly lower the threshold for inlining cold functions
289 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
293 // Give a bonus functions with a small number of blocks,
294 // We normally have two or three blocks for even
295 // very small functions.
296 if callee_body.basic_blocks().len() <= 3 {
297 threshold += threshold / 4;
299 debug!(" final inline threshold = {}", threshold);
301 // FIXME: Give a bonus to functions with only a single caller
303 let param_env = tcx.param_env(self.source.def_id());
305 let mut first_block = true;
308 // Traverse the MIR manually so we can account for the effects of
309 // inlining on the CFG.
310 let mut work_list = vec![START_BLOCK];
311 let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
312 while let Some(bb) = work_list.pop() {
313 if !visited.insert(bb.index()) {
316 let blk = &callee_body.basic_blocks()[bb];
318 for stmt in &blk.statements {
319 // Don't count StorageLive/StorageDead in the inlining cost.
321 StatementKind::StorageLive(_)
322 | StatementKind::StorageDead(_)
323 | StatementKind::Nop => {}
324 _ => cost += INSTR_COST,
327 let term = blk.terminator();
328 let mut is_drop = false;
330 TerminatorKind::Drop { ref place, target, unwind }
331 | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => {
333 work_list.push(target);
334 // If the place doesn't actually need dropping, treat it like
336 let ty = place.ty(callee_body, tcx).subst(tcx, callsite.substs).ty;
337 if ty.needs_drop(tcx, param_env) {
338 cost += CALL_PENALTY;
339 if let Some(unwind) = unwind {
340 cost += LANDINGPAD_PENALTY;
341 work_list.push(unwind);
348 TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. }
351 // If the function always diverges, don't inline
352 // unless the cost is zero
356 TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
357 if let ty::FnDef(def_id, _) = *f.literal.ty.kind() {
358 // Don't give intrinsics the extra penalty for calls
359 let f = tcx.fn_sig(def_id);
360 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
363 cost += CALL_PENALTY;
366 cost += CALL_PENALTY;
368 if cleanup.is_some() {
369 cost += LANDINGPAD_PENALTY;
372 TerminatorKind::Assert { cleanup, .. } => {
373 cost += CALL_PENALTY;
375 if cleanup.is_some() {
376 cost += LANDINGPAD_PENALTY;
379 TerminatorKind::Resume => cost += RESUME_PENALTY,
380 _ => cost += INSTR_COST,
384 for &succ in term.successors() {
385 work_list.push(succ);
392 // Count up the cost of local variables and temps, if we know the size
393 // use that, otherwise we use a moderately-large dummy cost.
395 let ptr_size = tcx.data_layout.pointer_size.bytes();
397 for v in callee_body.vars_and_temps_iter() {
398 let v = &callee_body.local_decls[v];
399 let ty = v.ty.subst(tcx, callsite.substs);
400 // Cost of the var is the size in machine-words, if we know
402 if let Some(size) = type_size_of(tcx, param_env, ty) {
403 cost += (size / ptr_size) as usize;
405 cost += UNKNOWN_SIZE_COST;
409 if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
410 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
413 if cost <= threshold {
414 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
417 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
425 callsite: CallSite<'tcx>,
426 caller_body: &mut Body<'tcx>,
427 mut callee_body: Body<'tcx>,
429 let terminator = caller_body[callsite.bb].terminator.take().unwrap();
430 match terminator.kind {
431 // FIXME: Handle inlining of diverging calls
432 TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
433 debug!("inlined {:?} into {:?}", callsite.callee, self.source);
435 let mut local_map = IndexVec::with_capacity(callee_body.local_decls.len());
436 let mut scope_map = IndexVec::with_capacity(callee_body.source_scopes.len());
438 for mut scope in callee_body.source_scopes.iter().cloned() {
439 if scope.parent_scope.is_none() {
440 scope.parent_scope = Some(callsite.location.scope);
441 // FIXME(eddyb) is this really needed?
442 // (also note that it's always overwritten below)
443 scope.span = callee_body.span;
446 // FIXME(eddyb) this doesn't seem right at all.
447 // The inlined source scopes should probably be annotated as
448 // such, but also contain all of the original information.
449 scope.span = callsite.location.span;
451 let idx = caller_body.source_scopes.push(scope);
455 for loc in callee_body.vars_and_temps_iter() {
456 let mut local = callee_body.local_decls[loc].clone();
458 local.source_info.scope = scope_map[local.source_info.scope];
459 local.source_info.span = callsite.location.span;
461 let idx = caller_body.local_decls.push(local);
465 // If the call is something like `a[*i] = f(i)`, where
466 // `i : &mut usize`, then just duplicating the `a[*i]`
467 // Place could result in two different locations if `f`
468 // writes to `i`. To prevent this we need to create a temporary
469 // borrow of the place and pass the destination as `*temp` instead.
470 fn dest_needs_borrow(place: Place<'_>) -> bool {
471 for elem in place.projection.iter() {
473 ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
481 let dest = if dest_needs_borrow(destination.0) {
482 debug!("creating temp for return destination");
483 let dest = Rvalue::Ref(
484 self.tcx.lifetimes.re_erased,
485 BorrowKind::Mut { allow_two_phase_borrow: false },
489 let ty = dest.ty(caller_body, self.tcx);
491 let temp = LocalDecl::new(ty, callsite.location.span);
493 let tmp = caller_body.local_decls.push(temp);
494 let tmp = Place::from(tmp);
496 let stmt = Statement {
497 source_info: callsite.location,
498 kind: StatementKind::Assign(box (tmp, dest)),
500 caller_body[callsite.bb].statements.push(stmt);
501 self.tcx.mk_place_deref(tmp)
506 let return_block = destination.1;
508 // Copy the arguments if needed.
509 let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, return_block);
511 let bb_len = caller_body.basic_blocks().len();
512 let mut integrator = Integrator {
519 cleanup_block: cleanup,
520 in_cleanup_block: false,
524 for mut var_debug_info in callee_body.var_debug_info.drain(..) {
525 integrator.visit_var_debug_info(&mut var_debug_info);
526 caller_body.var_debug_info.push(var_debug_info);
529 for (bb, mut block) in callee_body.basic_blocks_mut().drain_enumerated(..) {
530 integrator.visit_basic_block_data(bb, &mut block);
531 caller_body.basic_blocks_mut().push(block);
534 let terminator = Terminator {
535 source_info: callsite.location,
536 kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) },
539 caller_body[callsite.bb].terminator = Some(terminator);
544 caller_body[callsite.bb].terminator =
545 Some(Terminator { source_info: terminator.source_info, kind });
553 args: Vec<Operand<'tcx>>,
554 callsite: &CallSite<'tcx>,
555 caller_body: &mut Body<'tcx>,
556 return_block: BasicBlock,
560 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
561 // The caller provides the arguments wrapped up in a tuple:
563 // tuple_tmp = (a, b, c)
564 // Fn::call(closure_ref, tuple_tmp)
566 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
567 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
568 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
571 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
573 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
574 // if we "spill" that into *another* temporary, so that we can map the argument
575 // variable in the callee MIR directly to an argument variable on our side.
576 // So we introduce temporaries like:
578 // tmp0 = tuple_tmp.0
579 // tmp1 = tuple_tmp.1
580 // tmp2 = tuple_tmp.2
582 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
583 if tcx.is_closure(callsite.callee) {
584 let mut args = args.into_iter();
585 let self_ = self.create_temp_if_necessary(
586 args.next().unwrap(),
591 let tuple = self.create_temp_if_necessary(
592 args.next().unwrap(),
597 assert!(args.next().is_none());
599 let tuple = Place::from(tuple);
600 let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.kind() {
603 bug!("Closure arguments are not passed as a tuple");
606 // The `closure_ref` in our example above.
607 let closure_ref_arg = iter::once(self_);
609 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
610 let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
611 // This is e.g., `tuple_tmp.0` in our example above.
613 Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty.expect_ty()));
615 // Spill to a local to make e.g., `tmp0`.
616 self.create_temp_if_necessary(tuple_field, callsite, caller_body, return_block)
619 closure_ref_arg.chain(tuple_tmp_args).collect()
622 .map(|a| self.create_temp_if_necessary(a, callsite, caller_body, return_block))
627 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
628 /// temporary `T` and an instruction `T = arg`, and returns `T`.
629 fn create_temp_if_necessary(
632 callsite: &CallSite<'tcx>,
633 caller_body: &mut Body<'tcx>,
634 return_block: BasicBlock,
636 // FIXME: Analysis of the usage of the arguments to avoid
637 // unnecessary temporaries.
639 if let Operand::Move(place) = &arg {
640 if let Some(local) = place.as_local() {
641 if caller_body.local_kind(local) == LocalKind::Temp {
642 // Reuse the operand if it's a temporary already
648 debug!("creating temp for argument {:?}", arg);
649 // Otherwise, create a temporary for the arg
650 let arg = Rvalue::Use(arg);
652 let ty = arg.ty(caller_body, self.tcx);
654 let arg_tmp = LocalDecl::new(ty, callsite.location.span);
655 let arg_tmp = caller_body.local_decls.push(arg_tmp);
657 caller_body[callsite.bb].statements.push(Statement {
658 source_info: callsite.location,
659 kind: StatementKind::StorageLive(arg_tmp),
661 caller_body[callsite.bb].statements.push(Statement {
662 source_info: callsite.location,
663 kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)),
665 caller_body[return_block].statements.insert(
667 Statement { source_info: callsite.location, kind: StatementKind::StorageDead(arg_tmp) },
674 fn type_size_of<'tcx>(
676 param_env: ty::ParamEnv<'tcx>,
679 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
685 * Integrates blocks from the callee function into the calling function.
686 * Updates block indices, references to locals and other control flow
689 struct Integrator<'a, 'tcx> {
692 local_map: IndexVec<Local, Local>,
693 scope_map: IndexVec<SourceScope, SourceScope>,
694 destination: Place<'tcx>,
695 return_block: BasicBlock,
696 cleanup_block: Option<BasicBlock>,
697 in_cleanup_block: bool,
701 impl<'a, 'tcx> Integrator<'a, 'tcx> {
702 fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
703 let new = BasicBlock::new(tgt.index() + self.block_idx);
704 debug!("updating target `{:?}`, new: `{:?}`", tgt, new);
708 fn make_integrate_local(&self, local: Local) -> Local {
709 if local == RETURN_PLACE {
710 return self.destination.local;
713 let idx = local.index() - 1;
714 if idx < self.args.len() {
715 return self.args[idx];
718 self.local_map[Local::new(idx - self.args.len())]
722 impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
723 fn tcx(&self) -> TyCtxt<'tcx> {
727 fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
728 *local = self.make_integrate_local(*local);
731 fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
732 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
733 let dest_proj_len = self.destination.projection.len();
734 if place.local == RETURN_PLACE && dest_proj_len > 0 {
735 let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
736 projs.extend(self.destination.projection);
737 projs.extend(place.projection);
739 place.projection = self.tcx.intern_place_elems(&*projs);
741 // Handles integrating any locals that occur in the base
743 self.super_place(place, context, location)
746 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
747 self.in_cleanup_block = data.is_cleanup;
748 self.super_basic_block_data(block, data);
749 self.in_cleanup_block = false;
752 fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
753 self.super_retag(kind, place, loc);
755 // We have to patch all inlined retags to be aware that they are no longer
756 // happening on function entry.
757 if *kind == RetagKind::FnEntry {
758 *kind = RetagKind::Default;
762 fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
763 // Don't try to modify the implicit `_0` access on return (`return` terminators are
764 // replaced down below anyways).
765 if !matches!(terminator.kind, TerminatorKind::Return) {
766 self.super_terminator(terminator, loc);
769 match terminator.kind {
770 TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
771 TerminatorKind::Goto { ref mut target } => {
772 *target = self.update_target(*target);
774 TerminatorKind::SwitchInt { ref mut targets, .. } => {
776 *tgt = self.update_target(*tgt);
779 TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
780 | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
781 *target = self.update_target(*target);
782 if let Some(tgt) = *unwind {
783 *unwind = Some(self.update_target(tgt));
784 } else if !self.in_cleanup_block {
785 // Unless this drop is in a cleanup block, add an unwind edge to
786 // the original call's cleanup block
787 *unwind = self.cleanup_block;
790 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
791 if let Some((_, ref mut tgt)) = *destination {
792 *tgt = self.update_target(*tgt);
794 if let Some(tgt) = *cleanup {
795 *cleanup = Some(self.update_target(tgt));
796 } else if !self.in_cleanup_block {
797 // Unless this call is in a cleanup block, add an unwind edge to
798 // the original call's cleanup block
799 *cleanup = self.cleanup_block;
802 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
803 *target = self.update_target(*target);
804 if let Some(tgt) = *cleanup {
805 *cleanup = Some(self.update_target(tgt));
806 } else if !self.in_cleanup_block {
807 // Unless this assert is in a cleanup block, add an unwind edge to
808 // the original call's cleanup block
809 *cleanup = self.cleanup_block;
812 TerminatorKind::Return => {
813 terminator.kind = TerminatorKind::Goto { target: self.return_block };
815 TerminatorKind::Resume => {
816 if let Some(tgt) = self.cleanup_block {
817 terminator.kind = TerminatorKind::Goto { target: tgt }
820 TerminatorKind::Abort => {}
821 TerminatorKind::Unreachable => {}
822 TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
823 *real_target = self.update_target(*real_target);
824 *imaginary_target = self.update_target(*imaginary_target);
826 TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
827 // see the ordering of passes in the optimized_mir query.
829 bug!("False unwinds should have been removed before inlining")
831 TerminatorKind::InlineAsm { ref mut destination, .. } => {
832 if let Some(ref mut tgt) = *destination {
833 *tgt = self.update_target(*tgt);
839 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
840 *scope = self.scope_map[*scope];