1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Inlining pass for MIR functions
14 use rustc::hir::def_id::DefId;
16 use rustc_data_structures::bitvec::BitVector;
17 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
20 use rustc::mir::visit::*;
21 use rustc::ty::{self, Instance, Ty, TyCtxt, TypeFoldable};
22 use rustc::ty::layout::LayoutOf;
23 use rustc::ty::subst::{Subst,Substs};
25 use std::collections::VecDeque;
27 use transform::{MirPass, MirSource};
28 use super::simplify::{remove_dead_blocks, CfgSimplifier};
33 const DEFAULT_THRESHOLD: usize = 50;
34 const HINT_THRESHOLD: usize = 100;
36 const INSTR_COST: usize = 5;
37 const CALL_PENALTY: usize = 25;
39 const UNKNOWN_SIZE_COST: usize = 10;
43 #[derive(Copy, Clone, Debug)]
44 struct CallSite<'tcx> {
46 substs: &'tcx Substs<'tcx>,
51 impl MirPass for Inline {
52 fn run_pass<'a, 'tcx>(&self,
53 tcx: TyCtxt<'a, 'tcx, 'tcx>,
55 mir: &mut Mir<'tcx>) {
56 if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
57 Inliner { tcx, source }.run_pass(mir);
62 struct Inliner<'a, 'tcx: 'a> {
63 tcx: TyCtxt<'a, 'tcx, 'tcx>,
67 impl<'a, 'tcx> Inliner<'a, 'tcx> {
68 fn run_pass(&self, caller_mir: &mut Mir<'tcx>) {
69 // Keep a queue of callsites to try inlining on. We take
70 // advantage of the fact that queries detect cycles here to
71 // allow us to try and fetch the fully optimized MIR of a
72 // call; if it succeeds, we can inline it and we know that
73 // they do not call us. Otherwise, we just don't try to
76 // We use a queue so that we inline "broadly" before we inline
77 // in depth. It is unclear if this is the best heuristic,
78 // really, but that's true of all the heuristics in this
81 let mut callsites = VecDeque::new();
83 let param_env = self.tcx.param_env(self.source.def_id);
85 // Only do inlining into fn bodies.
86 let id = self.tcx.hir.as_local_node_id(self.source.def_id).unwrap();
87 let body_owner_kind = self.tcx.hir.body_owner_kind(id);
88 if let (hir::BodyOwnerKind::Fn, None) = (body_owner_kind, self.source.promoted) {
90 for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated() {
91 // Don't inline calls that are in cleanup blocks.
92 if bb_data.is_cleanup { continue; }
94 // Only consider direct calls to functions
95 let terminator = bb_data.terminator();
96 if let TerminatorKind::Call {
97 func: Operand::Constant(ref f), .. } = terminator.kind {
98 if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
99 if let Some(instance) = Instance::resolve(self.tcx,
103 callsites.push_back(CallSite {
104 callee: instance.def_id(),
105 substs: instance.substs,
107 location: terminator.source_info
117 let mut local_change;
118 let mut changed = false;
121 local_change = false;
122 while let Some(callsite) = callsites.pop_front() {
123 debug!("checking whether to inline callsite {:?}", callsite);
124 if !self.tcx.is_mir_available(callsite.callee) {
125 debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite);
129 let callee_mir = match ty::queries::optimized_mir::try_get(self.tcx,
130 callsite.location.span,
132 Ok(ref callee_mir) if self.should_inline(callsite, callee_mir) => {
133 subst_and_normalize(callee_mir, self.tcx, &callsite.substs, param_env)
138 // FIXME(#43542) shouldn't have to cancel an error
144 let start = caller_mir.basic_blocks().len();
145 debug!("attempting to inline callsite {:?} - mir={:?}", callsite, callee_mir);
146 if !self.inline_call(callsite, caller_mir, callee_mir) {
147 debug!("attempting to inline callsite {:?} - failure", callsite);
150 debug!("attempting to inline callsite {:?} - success", callsite);
152 // Add callsites from inlined function
153 for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) {
154 // Only consider direct calls to functions
155 let terminator = bb_data.terminator();
156 if let TerminatorKind::Call {
157 func: Operand::Constant(ref f), .. } = terminator.kind {
158 if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
159 // Don't inline the same function multiple times.
160 if callsite.callee != callee_def_id {
161 callsites.push_back(CallSite {
162 callee: callee_def_id,
165 location: terminator.source_info
181 // Simplify if we inlined anything.
183 debug!("Running simplify cfg on {:?}", self.source);
184 CfgSimplifier::new(caller_mir).simplify();
185 remove_dead_blocks(caller_mir);
189 fn should_inline(&self,
190 callsite: CallSite<'tcx>,
191 callee_mir: &Mir<'tcx>)
194 debug!("should_inline({:?})", callsite);
197 // Don't inline closures that have captures
198 // FIXME: Handle closures better
199 if callee_mir.upvar_decls.len() > 0 {
200 debug!(" upvar decls present - not inlining");
204 // Cannot inline generators which haven't been transformed yet
205 if callee_mir.yield_ty.is_some() {
206 debug!(" yield ty present - not inlining");
210 let attrs = tcx.get_attrs(callsite.callee);
211 let hint = attr::find_inline_attr(None, &attrs[..]);
213 let hinted = match hint {
214 // Just treat inline(always) as a hint for now,
215 // there are cases that prevent inlining that we
216 // need to check for first.
217 attr::InlineAttr::Always => true,
218 attr::InlineAttr::Never => {
219 debug!("#[inline(never)] present - not inlining");
222 attr::InlineAttr::Hint => true,
223 attr::InlineAttr::None => false,
226 // Only inline local functions if they would be eligible for cross-crate
227 // inlining. This is to ensure that the final crate doesn't have MIR that
228 // reference unexported symbols
229 if callsite.callee.is_local() {
230 if callsite.substs.types().count() == 0 && !hinted {
231 debug!(" callee is an exported function - not inlining");
236 let mut threshold = if hinted {
242 // Significantly lower the threshold for inlining cold functions
243 if attr::contains_name(&attrs[..], "cold") {
247 // Give a bonus functions with a small number of blocks,
248 // We normally have two or three blocks for even
249 // very small functions.
250 if callee_mir.basic_blocks().len() <= 3 {
251 threshold += threshold / 4;
253 debug!(" final inline threshold = {}", threshold);
255 // FIXME: Give a bonus to functions with only a single caller
257 let param_env = tcx.param_env(self.source.def_id);
259 let mut first_block = true;
262 // Traverse the MIR manually so we can account for the effects of
263 // inlining on the CFG.
264 let mut work_list = vec![START_BLOCK];
265 let mut visited = BitVector::new(callee_mir.basic_blocks().len());
266 while let Some(bb) = work_list.pop() {
267 if !visited.insert(bb.index()) { continue; }
268 let blk = &callee_mir.basic_blocks()[bb];
270 for stmt in &blk.statements {
271 // Don't count StorageLive/StorageDead in the inlining cost.
273 StatementKind::StorageLive(_) |
274 StatementKind::StorageDead(_) |
275 StatementKind::Nop => {}
276 _ => cost += INSTR_COST
279 let term = blk.terminator();
280 let mut is_drop = false;
282 TerminatorKind::Drop { ref location, target, unwind } |
283 TerminatorKind::DropAndReplace { ref location, target, unwind, .. } => {
285 work_list.push(target);
286 // If the location doesn't actually need dropping, treat it like
288 let ty = location.ty(callee_mir, tcx).subst(tcx, callsite.substs);
289 let ty = ty.to_ty(tcx);
290 if ty.needs_drop(tcx, param_env) {
291 cost += CALL_PENALTY;
292 if let Some(unwind) = unwind {
293 work_list.push(unwind);
300 TerminatorKind::Unreachable |
301 TerminatorKind::Call { destination: None, .. } if first_block => {
302 // If the function always diverges, don't inline
303 // unless the cost is zero
307 TerminatorKind::Call {func: Operand::Constant(ref f), .. } => {
308 if let ty::TyFnDef(def_id, _) = f.ty.sty {
309 // Don't give intrinsics the extra penalty for calls
310 let f = tcx.fn_sig(def_id);
311 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
314 cost += CALL_PENALTY;
318 TerminatorKind::Assert { .. } => cost += CALL_PENALTY,
319 _ => cost += INSTR_COST
323 for &succ in &term.successors()[..] {
324 work_list.push(succ);
331 // Count up the cost of local variables and temps, if we know the size
332 // use that, otherwise we use a moderately-large dummy cost.
334 let ptr_size = tcx.data_layout.pointer_size.bytes();
336 for v in callee_mir.vars_and_temps_iter() {
337 let v = &callee_mir.local_decls[v];
338 let ty = v.ty.subst(tcx, callsite.substs);
339 // Cost of the var is the size in machine-words, if we know
341 if let Some(size) = type_size_of(tcx, param_env.clone(), ty) {
342 cost += (size / ptr_size) as usize;
344 cost += UNKNOWN_SIZE_COST;
348 if let attr::InlineAttr::Always = hint {
349 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
352 if cost <= threshold {
353 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
356 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
362 fn inline_call(&self,
363 callsite: CallSite<'tcx>,
364 caller_mir: &mut Mir<'tcx>,
365 mut callee_mir: Mir<'tcx>) -> bool {
366 let terminator = caller_mir[callsite.bb].terminator.take().unwrap();
367 match terminator.kind {
368 // FIXME: Handle inlining of diverging calls
369 TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
370 debug!("Inlined {:?} into {:?}", callsite.callee, self.source);
372 let is_box_free = Some(callsite.callee) == self.tcx.lang_items().box_free_fn();
374 let mut local_map = IndexVec::with_capacity(callee_mir.local_decls.len());
375 let mut scope_map = IndexVec::with_capacity(callee_mir.visibility_scopes.len());
376 let mut promoted_map = IndexVec::with_capacity(callee_mir.promoted.len());
378 for mut scope in callee_mir.visibility_scopes.iter().cloned() {
379 if scope.parent_scope.is_none() {
380 scope.parent_scope = Some(callsite.location.scope);
381 scope.span = callee_mir.span;
384 scope.span = callsite.location.span;
386 let idx = caller_mir.visibility_scopes.push(scope);
390 for loc in callee_mir.vars_and_temps_iter() {
391 let mut local = callee_mir.local_decls[loc].clone();
393 local.source_info.scope = scope_map[local.source_info.scope];
394 local.source_info.span = callsite.location.span;
396 let idx = caller_mir.local_decls.push(local);
400 for p in callee_mir.promoted.iter().cloned() {
401 let idx = caller_mir.promoted.push(p);
402 promoted_map.push(idx);
405 // If the call is something like `a[*i] = f(i)`, where
406 // `i : &mut usize`, then just duplicating the `a[*i]`
407 // Lvalue could result in two different locations if `f`
408 // writes to `i`. To prevent this we need to create a temporary
409 // borrow of the lvalue and pass the destination as `*temp` instead.
410 fn dest_needs_borrow(lval: &Lvalue) -> bool {
412 Lvalue::Projection(ref p) => {
414 ProjectionElem::Deref |
415 ProjectionElem::Index(_) => true,
416 _ => dest_needs_borrow(&p.base)
419 // Static variables need a borrow because the callee
420 // might modify the same static.
421 Lvalue::Static(_) => true,
426 let dest = if dest_needs_borrow(&destination.0) {
427 debug!("Creating temp for return destination");
428 let dest = Rvalue::Ref(
429 self.tcx.types.re_erased,
433 let ty = dest.ty(caller_mir, self.tcx);
435 let temp = LocalDecl::new_temp(ty, callsite.location.span);
437 let tmp = caller_mir.local_decls.push(temp);
438 let tmp = Lvalue::Local(tmp);
440 let stmt = Statement {
441 source_info: callsite.location,
442 kind: StatementKind::Assign(tmp.clone(), dest)
444 caller_mir[callsite.bb]
445 .statements.push(stmt);
451 let return_block = destination.1;
453 let args : Vec<_> = if is_box_free {
454 assert!(args.len() == 1);
455 // box_free takes a Box, but is defined with a *mut T, inlining
456 // needs to generate the cast.
457 // FIXME: we should probably just generate correct MIR in the first place...
459 let arg = if let Operand::Move(ref lval) = args[0] {
462 bug!("Constant arg to \"box_free\"");
465 let ptr_ty = args[0].ty(caller_mir, self.tcx);
466 vec![self.cast_box_free_arg(arg, ptr_ty, &callsite, caller_mir)]
468 // Copy the arguments if needed.
469 self.make_call_args(args, &callsite, caller_mir)
472 let bb_len = caller_mir.basic_blocks().len();
473 let mut integrator = Integrator {
482 cleanup_block: cleanup,
483 in_cleanup_block: false
487 for (bb, mut block) in callee_mir.basic_blocks_mut().drain_enumerated(..) {
488 integrator.visit_basic_block_data(bb, &mut block);
489 caller_mir.basic_blocks_mut().push(block);
492 let terminator = Terminator {
493 source_info: callsite.location,
494 kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) }
497 caller_mir[callsite.bb].terminator = Some(terminator);
502 caller_mir[callsite.bb].terminator = Some(Terminator {
503 source_info: terminator.source_info,
511 fn cast_box_free_arg(&self, arg: Lvalue<'tcx>, ptr_ty: Ty<'tcx>,
512 callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Local {
513 let arg = Rvalue::Ref(
514 self.tcx.types.re_erased,
518 let ty = arg.ty(caller_mir, self.tcx);
519 let ref_tmp = LocalDecl::new_temp(ty, callsite.location.span);
520 let ref_tmp = caller_mir.local_decls.push(ref_tmp);
521 let ref_tmp = Lvalue::Local(ref_tmp);
523 let ref_stmt = Statement {
524 source_info: callsite.location,
525 kind: StatementKind::Assign(ref_tmp.clone(), arg)
528 caller_mir[callsite.bb]
529 .statements.push(ref_stmt);
531 let pointee_ty = match ptr_ty.sty {
532 ty::TyRawPtr(tm) | ty::TyRef(_, tm) => tm.ty,
533 _ if ptr_ty.is_box() => ptr_ty.boxed_ty(),
534 _ => bug!("Invalid type `{:?}` for call to box_free", ptr_ty)
536 let ptr_ty = self.tcx.mk_mut_ptr(pointee_ty);
538 let raw_ptr = Rvalue::Cast(CastKind::Misc, Operand::Move(ref_tmp), ptr_ty);
540 let cast_tmp = LocalDecl::new_temp(ptr_ty, callsite.location.span);
541 let cast_tmp = caller_mir.local_decls.push(cast_tmp);
543 let cast_stmt = Statement {
544 source_info: callsite.location,
545 kind: StatementKind::Assign(Lvalue::Local(cast_tmp), raw_ptr)
548 caller_mir[callsite.bb]
549 .statements.push(cast_stmt);
556 args: Vec<Operand<'tcx>>,
557 callsite: &CallSite<'tcx>,
558 caller_mir: &mut Mir<'tcx>,
562 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
563 // The caller provides the arguments wrapped up in a tuple:
565 // tuple_tmp = (a, b, c)
566 // Fn::call(closure_ref, tuple_tmp)
568 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
569 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, trans has
570 // the job of unpacking this tuple. But here, we are trans. =) So we want to create
573 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
575 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
576 // if we "spill" that into *another* temporary, so that we can map the argument
577 // variable in the callee MIR directly to an argument variable on our side.
578 // So we introduce temporaries like:
580 // tmp0 = tuple_tmp.0
581 // tmp1 = tuple_tmp.1
582 // tmp2 = tuple_tmp.2
584 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
585 if tcx.is_closure(callsite.callee) {
586 let mut args = args.into_iter();
587 let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_mir);
588 let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_mir);
589 assert!(args.next().is_none());
591 let tuple = Lvalue::Local(tuple);
592 let tuple_tys = if let ty::TyTuple(s, _) = tuple.ty(caller_mir, tcx).to_ty(tcx).sty {
595 bug!("Closure arguments are not passed as a tuple");
598 // The `closure_ref` in our example above.
599 let closure_ref_arg = iter::once(self_);
601 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
603 tuple_tys.iter().enumerate().map(|(i, ty)| {
604 // This is e.g. `tuple_tmp.0` in our example above.
605 let tuple_field = Operand::Move(tuple.clone().field(Field::new(i), ty));
607 // Spill to a local to make e.g. `tmp0`.
608 self.create_temp_if_necessary(tuple_field, callsite, caller_mir)
611 closure_ref_arg.chain(tuple_tmp_args).collect()
614 .map(|a| self.create_temp_if_necessary(a, callsite, caller_mir))
619 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
620 /// temporary `T` and an instruction `T = arg`, and returns `T`.
621 fn create_temp_if_necessary(
624 callsite: &CallSite<'tcx>,
625 caller_mir: &mut Mir<'tcx>,
627 // FIXME: Analysis of the usage of the arguments to avoid
628 // unnecessary temporaries.
630 if let Operand::Move(Lvalue::Local(local)) = arg {
631 if caller_mir.local_kind(local) == LocalKind::Temp {
632 // Reuse the operand if it's a temporary already
637 debug!("Creating temp for argument {:?}", arg);
638 // Otherwise, create a temporary for the arg
639 let arg = Rvalue::Use(arg);
641 let ty = arg.ty(caller_mir, self.tcx);
643 let arg_tmp = LocalDecl::new_temp(ty, callsite.location.span);
644 let arg_tmp = caller_mir.local_decls.push(arg_tmp);
646 let stmt = Statement {
647 source_info: callsite.location,
648 kind: StatementKind::Assign(Lvalue::Local(arg_tmp), arg),
650 caller_mir[callsite.bb].statements.push(stmt);
655 fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
656 param_env: ty::ParamEnv<'tcx>,
657 ty: Ty<'tcx>) -> Option<u64> {
658 (tcx, param_env).layout_of(ty).ok().map(|layout| layout.size.bytes())
661 fn subst_and_normalize<'a, 'tcx: 'a>(
663 tcx: TyCtxt<'a, 'tcx, 'tcx>,
664 substs: &'tcx ty::subst::Substs<'tcx>,
665 param_env: ty::ParamEnv<'tcx>,
667 struct Folder<'a, 'tcx: 'a> {
668 tcx: TyCtxt<'a, 'tcx, 'tcx>,
669 param_env: ty::ParamEnv<'tcx>,
670 substs: &'tcx ty::subst::Substs<'tcx>,
672 impl<'a, 'tcx: 'a> ty::fold::TypeFolder<'tcx, 'tcx> for Folder<'a, 'tcx> {
673 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
677 fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
678 self.tcx.trans_apply_param_substs_env(&self.substs, self.param_env, &t)
681 let mut f = Folder { tcx, param_env, substs };
682 mir.fold_with(&mut f)
688 * Integrates blocks from the callee function into the calling function.
689 * Updates block indices, references to locals and other control flow
692 struct Integrator<'a, 'tcx: 'a> {
695 local_map: IndexVec<Local, Local>,
696 scope_map: IndexVec<VisibilityScope, VisibilityScope>,
697 promoted_map: IndexVec<Promoted, Promoted>,
698 _callsite: CallSite<'tcx>,
699 destination: Lvalue<'tcx>,
700 return_block: BasicBlock,
701 cleanup_block: Option<BasicBlock>,
702 in_cleanup_block: bool,
705 impl<'a, 'tcx> Integrator<'a, 'tcx> {
706 fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
707 let new = BasicBlock::new(tgt.index() + self.block_idx);
708 debug!("Updating target `{:?}`, new: `{:?}`", tgt, new);
713 impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
714 fn visit_local(&mut self,
716 _ctxt: LvalueContext<'tcx>,
717 _location: Location) {
718 if *local == RETURN_POINTER {
719 match self.destination {
720 Lvalue::Local(l) => {
724 ref lval => bug!("Return lvalue is {:?}, not local", lval)
727 let idx = local.index() - 1;
728 if idx < self.args.len() {
729 *local = self.args[idx];
732 *local = self.local_map[Local::new(idx - self.args.len())];
735 fn visit_lvalue(&mut self,
736 lvalue: &mut Lvalue<'tcx>,
737 _ctxt: LvalueContext<'tcx>,
738 _location: Location) {
739 if let Lvalue::Local(RETURN_POINTER) = *lvalue {
740 // Return pointer; update the lvalue itself
741 *lvalue = self.destination.clone();
743 self.super_lvalue(lvalue, _ctxt, _location);
747 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
748 self.in_cleanup_block = data.is_cleanup;
749 self.super_basic_block_data(block, data);
750 self.in_cleanup_block = false;
753 fn visit_terminator_kind(&mut self, block: BasicBlock,
754 kind: &mut TerminatorKind<'tcx>, loc: Location) {
755 self.super_terminator_kind(block, kind, loc);
758 TerminatorKind::GeneratorDrop |
759 TerminatorKind::Yield { .. } => bug!(),
760 TerminatorKind::Goto { ref mut target} => {
761 *target = self.update_target(*target);
763 TerminatorKind::SwitchInt { ref mut targets, .. } => {
765 *tgt = self.update_target(*tgt);
768 TerminatorKind::Drop { ref mut target, ref mut unwind, .. } |
769 TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
770 *target = self.update_target(*target);
771 if let Some(tgt) = *unwind {
772 *unwind = Some(self.update_target(tgt));
773 } else if !self.in_cleanup_block {
774 // Unless this drop is in a cleanup block, add an unwind edge to
775 // the orignal call's cleanup block
776 *unwind = self.cleanup_block;
779 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
780 if let Some((_, ref mut tgt)) = *destination {
781 *tgt = self.update_target(*tgt);
783 if let Some(tgt) = *cleanup {
784 *cleanup = Some(self.update_target(tgt));
785 } else if !self.in_cleanup_block {
786 // Unless this call is in a cleanup block, add an unwind edge to
787 // the orignal call's cleanup block
788 *cleanup = self.cleanup_block;
791 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
792 *target = self.update_target(*target);
793 if let Some(tgt) = *cleanup {
794 *cleanup = Some(self.update_target(tgt));
795 } else if !self.in_cleanup_block {
796 // Unless this assert is in a cleanup block, add an unwind edge to
797 // the orignal call's cleanup block
798 *cleanup = self.cleanup_block;
801 TerminatorKind::Return => {
802 *kind = TerminatorKind::Goto { target: self.return_block };
804 TerminatorKind::Resume => {
805 if let Some(tgt) = self.cleanup_block {
806 *kind = TerminatorKind::Goto { target: tgt }
809 TerminatorKind::Unreachable => { }
810 TerminatorKind::FalseEdges { ref mut real_target, ref mut imaginary_targets } => {
811 *real_target = self.update_target(*real_target);
812 for target in imaginary_targets {
813 *target = self.update_target(*target);
819 fn visit_visibility_scope(&mut self, scope: &mut VisibilityScope) {
820 *scope = self.scope_map[*scope];
823 fn visit_literal(&mut self, literal: &mut Literal<'tcx>, loc: Location) {
824 if let Literal::Promoted { ref mut index } = *literal {
825 if let Some(p) = self.promoted_map.get(*index).cloned() {
829 self.super_literal(literal, loc);