1 //! Inlining pass for MIR functions
3 use rustc::middle::codegen_fn_attrs::CodegenFnAttrFlags;
4 use rustc::mir::visit::*;
6 use rustc::ty::subst::{InternalSubsts, Subst, SubstsRef};
7 use rustc::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable};
8 use rustc_attr as attr;
9 use rustc_hir::def_id::DefId;
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::{Idx, IndexVec};
12 use rustc_session::config::Sanitizer;
13 use rustc_target::spec::abi::Abi;
15 use super::simplify::{remove_dead_blocks, CfgSimplifier};
16 use crate::transform::{MirPass, MirSource};
17 use std::collections::VecDeque;
20 const DEFAULT_THRESHOLD: usize = 50;
21 const HINT_THRESHOLD: usize = 100;
23 const INSTR_COST: usize = 5;
24 const CALL_PENALTY: usize = 25;
26 const UNKNOWN_SIZE_COST: usize = 10;
30 #[derive(Copy, Clone, Debug)]
31 struct CallSite<'tcx> {
33 substs: SubstsRef<'tcx>,
38 impl<'tcx> MirPass<'tcx> for Inline {
39 fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut BodyAndCache<'tcx>) {
40 if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
41 Inliner { tcx, source }.run_pass(body);
46 struct Inliner<'tcx> {
48 source: MirSource<'tcx>,
52 fn run_pass(&self, caller_body: &mut BodyAndCache<'tcx>) {
53 // Keep a queue of callsites to try inlining on. We take
54 // advantage of the fact that queries detect cycles here to
55 // allow us to try and fetch the fully optimized MIR of a
56 // call; if it succeeds, we can inline it and we know that
57 // they do not call us. Otherwise, we just don't try to
60 // We use a queue so that we inline "broadly" before we inline
61 // in depth. It is unclear if this is the best heuristic,
62 // really, but that's true of all the heuristics in this
65 let mut callsites = VecDeque::new();
67 let mut param_env = self.tcx.param_env(self.source.def_id());
69 let substs = &InternalSubsts::identity_for_item(self.tcx, self.source.def_id());
71 // For monomorphic functions, we can use `Reveal::All` to resolve specialized instances.
72 if !substs.needs_subst() {
73 param_env = param_env.with_reveal_all();
76 // Only do inlining into fn bodies.
77 let id = self.tcx.hir().as_local_hir_id(self.source.def_id()).unwrap();
78 if self.tcx.hir().body_owner_kind(id).is_fn_or_closure() && self.source.promoted.is_none() {
79 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
80 if let Some(callsite) =
81 self.get_valid_function_call(bb, bb_data, caller_body, param_env)
83 callsites.push_back(callsite);
91 let mut changed = false;
95 while let Some(callsite) = callsites.pop_front() {
96 debug!("checking whether to inline callsite {:?}", callsite);
97 if !self.tcx.is_mir_available(callsite.callee) {
98 debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite);
102 let self_node_id = self.tcx.hir().as_local_node_id(self.source.def_id()).unwrap();
103 let callee_node_id = self.tcx.hir().as_local_node_id(callsite.callee);
105 let callee_body = if let Some(callee_node_id) = callee_node_id {
106 // Avoid a cycle here by only using `optimized_mir` only if we have
107 // a lower node id than the callee. This ensures that the callee will
108 // not inline us. This trick only works without incremental compilation.
109 // So don't do it if that is enabled.
110 if !self.tcx.dep_graph.is_fully_enabled()
111 && self_node_id.as_u32() < callee_node_id.as_u32()
113 self.tcx.optimized_mir(callsite.callee)
118 // This cannot result in a cycle since the callee MIR is from another crate
119 // and is already optimized.
120 self.tcx.optimized_mir(callsite.callee)
123 let callee_body = if self.consider_optimizing(callsite, callee_body) {
124 self.tcx.subst_and_normalize_erasing_regions(
133 let start = caller_body.basic_blocks().len();
134 debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
135 if !self.inline_call(callsite, caller_body, callee_body) {
136 debug!("attempting to inline callsite {:?} - failure", callsite);
139 debug!("attempting to inline callsite {:?} - success", callsite);
141 // Add callsites from inlined function
142 for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
143 if let Some(new_callsite) =
144 self.get_valid_function_call(bb, bb_data, caller_body, param_env)
146 // Don't inline the same function multiple times.
147 if callsite.callee != new_callsite.callee {
148 callsites.push_back(new_callsite);
162 // Simplify if we inlined anything.
164 debug!("running simplify cfg on {:?}", self.source);
165 CfgSimplifier::new(caller_body).simplify();
166 remove_dead_blocks(caller_body);
170 fn get_valid_function_call(
173 bb_data: &BasicBlockData<'tcx>,
174 caller_body: &Body<'tcx>,
175 param_env: ParamEnv<'tcx>,
176 ) -> Option<CallSite<'tcx>> {
177 // Don't inline calls that are in cleanup blocks.
178 if bb_data.is_cleanup {
182 // Only consider direct calls to functions
183 let terminator = bb_data.terminator();
184 if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
185 if let ty::FnDef(callee_def_id, substs) = op.ty(caller_body, self.tcx).kind {
186 let instance = Instance::resolve(self.tcx, param_env, callee_def_id, substs)?;
188 if let InstanceDef::Virtual(..) = instance.def {
192 return Some(CallSite {
193 callee: instance.def_id(),
194 substs: instance.substs,
196 location: terminator.source_info,
204 fn consider_optimizing(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
205 debug!("consider_optimizing({:?})", callsite);
206 self.should_inline(callsite, callee_body)
207 && self.tcx.consider_optimizing(|| {
208 format!("Inline {:?} into {:?}", callee_body.span, callsite)
212 fn should_inline(&self, callsite: CallSite<'tcx>, callee_body: &Body<'tcx>) -> bool {
213 debug!("should_inline({:?})", callsite);
216 // Cannot inline generators which haven't been transformed yet
217 if callee_body.yield_ty.is_some() {
218 debug!(" yield ty present - not inlining");
222 let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee);
224 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::TRACK_CALLER) {
225 debug!("`#[track_caller]` present - not inlining");
229 // Avoid inlining functions marked as no_sanitize if sanitizer is enabled,
230 // since instrumentation might be enabled and performed on the caller.
231 match self.tcx.sess.opts.debugging_opts.sanitizer {
232 Some(Sanitizer::Address) => {
233 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_SANITIZE_ADDRESS) {
237 Some(Sanitizer::Memory) => {
238 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_SANITIZE_MEMORY) {
242 Some(Sanitizer::Thread) => {
243 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_SANITIZE_THREAD) {
247 Some(Sanitizer::Leak) => {}
251 let hinted = match codegen_fn_attrs.inline {
252 // Just treat inline(always) as a hint for now,
253 // there are cases that prevent inlining that we
254 // need to check for first.
255 attr::InlineAttr::Always => true,
256 attr::InlineAttr::Never => {
257 debug!("`#[inline(never)]` present - not inlining");
260 attr::InlineAttr::Hint => true,
261 attr::InlineAttr::None => false,
264 // Only inline local functions if they would be eligible for cross-crate
265 // inlining. This is to ensure that the final crate doesn't have MIR that
266 // reference unexported symbols
267 if callsite.callee.is_local() {
268 if callsite.substs.non_erasable_generics().count() == 0 && !hinted {
269 debug!(" callee is an exported function - not inlining");
274 let mut threshold = if hinted { HINT_THRESHOLD } else { DEFAULT_THRESHOLD };
276 // Significantly lower the threshold for inlining cold functions
277 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
281 // Give a bonus functions with a small number of blocks,
282 // We normally have two or three blocks for even
283 // very small functions.
284 if callee_body.basic_blocks().len() <= 3 {
285 threshold += threshold / 4;
287 debug!(" final inline threshold = {}", threshold);
289 // FIXME: Give a bonus to functions with only a single caller
291 let param_env = tcx.param_env(self.source.def_id());
293 let mut first_block = true;
296 // Traverse the MIR manually so we can account for the effects of
297 // inlining on the CFG.
298 let mut work_list = vec![START_BLOCK];
299 let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
300 while let Some(bb) = work_list.pop() {
301 if !visited.insert(bb.index()) {
304 let blk = &callee_body.basic_blocks()[bb];
306 for stmt in &blk.statements {
307 // Don't count StorageLive/StorageDead in the inlining cost.
309 StatementKind::StorageLive(_)
310 | StatementKind::StorageDead(_)
311 | StatementKind::Nop => {}
312 _ => cost += INSTR_COST,
315 let term = blk.terminator();
316 let mut is_drop = false;
318 TerminatorKind::Drop { ref location, target, unwind }
319 | TerminatorKind::DropAndReplace { ref location, target, unwind, .. } => {
321 work_list.push(target);
322 // If the location doesn't actually need dropping, treat it like
324 let ty = location.ty(callee_body, tcx).subst(tcx, callsite.substs).ty;
325 if ty.needs_drop(tcx, param_env) {
326 cost += CALL_PENALTY;
327 if let Some(unwind) = unwind {
328 work_list.push(unwind);
335 TerminatorKind::Unreachable | TerminatorKind::Call { destination: None, .. }
338 // If the function always diverges, don't inline
339 // unless the cost is zero
343 TerminatorKind::Call { func: Operand::Constant(ref f), .. } => {
344 if let ty::FnDef(def_id, _) = f.literal.ty.kind {
345 // Don't give intrinsics the extra penalty for calls
346 let f = tcx.fn_sig(def_id);
347 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
350 cost += CALL_PENALTY;
354 TerminatorKind::Assert { .. } => cost += CALL_PENALTY,
355 _ => cost += INSTR_COST,
359 for &succ in term.successors() {
360 work_list.push(succ);
367 // Count up the cost of local variables and temps, if we know the size
368 // use that, otherwise we use a moderately-large dummy cost.
370 let ptr_size = tcx.data_layout.pointer_size.bytes();
372 for v in callee_body.vars_and_temps_iter() {
373 let v = &callee_body.local_decls[v];
374 let ty = v.ty.subst(tcx, callsite.substs);
375 // Cost of the var is the size in machine-words, if we know
377 if let Some(size) = type_size_of(tcx, param_env, ty) {
378 cost += (size / ptr_size) as usize;
380 cost += UNKNOWN_SIZE_COST;
384 if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
385 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
388 if cost <= threshold {
389 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
392 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
400 callsite: CallSite<'tcx>,
401 caller_body: &mut BodyAndCache<'tcx>,
402 mut callee_body: BodyAndCache<'tcx>,
404 let terminator = caller_body[callsite.bb].terminator.take().unwrap();
405 match terminator.kind {
406 // FIXME: Handle inlining of diverging calls
407 TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
408 debug!("inlined {:?} into {:?}", callsite.callee, self.source);
410 let mut local_map = IndexVec::with_capacity(callee_body.local_decls.len());
411 let mut scope_map = IndexVec::with_capacity(callee_body.source_scopes.len());
413 for mut scope in callee_body.source_scopes.iter().cloned() {
414 if scope.parent_scope.is_none() {
415 scope.parent_scope = Some(callsite.location.scope);
416 // FIXME(eddyb) is this really needed?
417 // (also note that it's always overwritten below)
418 scope.span = callee_body.span;
421 // FIXME(eddyb) this doesn't seem right at all.
422 // The inlined source scopes should probably be annotated as
423 // such, but also contain all of the original information.
424 scope.span = callsite.location.span;
426 let idx = caller_body.source_scopes.push(scope);
430 for loc in callee_body.vars_and_temps_iter() {
431 let mut local = callee_body.local_decls[loc].clone();
433 local.source_info.scope = scope_map[local.source_info.scope];
434 local.source_info.span = callsite.location.span;
436 let idx = caller_body.local_decls.push(local);
440 // If the call is something like `a[*i] = f(i)`, where
441 // `i : &mut usize`, then just duplicating the `a[*i]`
442 // Place could result in two different locations if `f`
443 // writes to `i`. To prevent this we need to create a temporary
444 // borrow of the place and pass the destination as `*temp` instead.
445 fn dest_needs_borrow(place: &Place<'_>) -> bool {
446 for elem in place.projection.iter() {
448 ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
456 let dest = if dest_needs_borrow(&destination.0) {
457 debug!("creating temp for return destination");
458 let dest = Rvalue::Ref(
459 self.tcx.lifetimes.re_erased,
460 BorrowKind::Mut { allow_two_phase_borrow: false },
464 let ty = dest.ty(&**caller_body, self.tcx);
466 let temp = LocalDecl::new_temp(ty, callsite.location.span);
468 let tmp = caller_body.local_decls.push(temp);
469 let tmp = Place::from(tmp);
471 let stmt = Statement {
472 source_info: callsite.location,
473 kind: StatementKind::Assign(box (tmp, dest)),
475 caller_body[callsite.bb].statements.push(stmt);
476 self.tcx.mk_place_deref(tmp)
481 let return_block = destination.1;
483 // Copy the arguments if needed.
484 let args: Vec<_> = self.make_call_args(args, &callsite, caller_body);
486 let bb_len = caller_body.basic_blocks().len();
487 let mut integrator = Integrator {
494 cleanup_block: cleanup,
495 in_cleanup_block: false,
499 for mut var_debug_info in callee_body.var_debug_info.drain(..) {
500 integrator.visit_var_debug_info(&mut var_debug_info);
501 caller_body.var_debug_info.push(var_debug_info);
504 for (bb, mut block) in callee_body.basic_blocks_mut().drain_enumerated(..) {
505 integrator.visit_basic_block_data(bb, &mut block);
506 caller_body.basic_blocks_mut().push(block);
509 let terminator = Terminator {
510 source_info: callsite.location,
511 kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) },
514 caller_body[callsite.bb].terminator = Some(terminator);
519 caller_body[callsite.bb].terminator =
520 Some(Terminator { source_info: terminator.source_info, kind });
528 args: Vec<Operand<'tcx>>,
529 callsite: &CallSite<'tcx>,
530 caller_body: &mut BodyAndCache<'tcx>,
534 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
535 // The caller provides the arguments wrapped up in a tuple:
537 // tuple_tmp = (a, b, c)
538 // Fn::call(closure_ref, tuple_tmp)
540 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
541 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
542 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
545 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
547 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
548 // if we "spill" that into *another* temporary, so that we can map the argument
549 // variable in the callee MIR directly to an argument variable on our side.
550 // So we introduce temporaries like:
552 // tmp0 = tuple_tmp.0
553 // tmp1 = tuple_tmp.1
554 // tmp2 = tuple_tmp.2
556 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
557 if tcx.is_closure(callsite.callee) {
558 let mut args = args.into_iter();
559 let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
560 let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
561 assert!(args.next().is_none());
563 let tuple = Place::from(tuple);
564 let tuple_tys = if let ty::Tuple(s) = tuple.ty(&**caller_body, tcx).ty.kind {
567 bug!("Closure arguments are not passed as a tuple");
570 // The `closure_ref` in our example above.
571 let closure_ref_arg = iter::once(self_);
573 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
574 let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
575 // This is e.g., `tuple_tmp.0` in our example above.
577 Operand::Move(tcx.mk_place_field(tuple.clone(), Field::new(i), ty.expect_ty()));
579 // Spill to a local to make e.g., `tmp0`.
580 self.create_temp_if_necessary(tuple_field, callsite, caller_body)
583 closure_ref_arg.chain(tuple_tmp_args).collect()
586 .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
591 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
592 /// temporary `T` and an instruction `T = arg`, and returns `T`.
593 fn create_temp_if_necessary(
596 callsite: &CallSite<'tcx>,
597 caller_body: &mut BodyAndCache<'tcx>,
599 // FIXME: Analysis of the usage of the arguments to avoid
600 // unnecessary temporaries.
602 if let Operand::Move(place) = &arg {
603 if let Some(local) = place.as_local() {
604 if caller_body.local_kind(local) == LocalKind::Temp {
605 // Reuse the operand if it's a temporary already
611 debug!("creating temp for argument {:?}", arg);
612 // Otherwise, create a temporary for the arg
613 let arg = Rvalue::Use(arg);
615 let ty = arg.ty(&**caller_body, self.tcx);
617 let arg_tmp = LocalDecl::new_temp(ty, callsite.location.span);
618 let arg_tmp = caller_body.local_decls.push(arg_tmp);
620 let stmt = Statement {
621 source_info: callsite.location,
622 kind: StatementKind::Assign(box (Place::from(arg_tmp), arg)),
624 caller_body[callsite.bb].statements.push(stmt);
629 fn type_size_of<'tcx>(
631 param_env: ty::ParamEnv<'tcx>,
634 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
640 * Integrates blocks from the callee function into the calling function.
641 * Updates block indices, references to locals and other control flow
644 struct Integrator<'a, 'tcx> {
647 local_map: IndexVec<Local, Local>,
648 scope_map: IndexVec<SourceScope, SourceScope>,
649 destination: Place<'tcx>,
650 return_block: BasicBlock,
651 cleanup_block: Option<BasicBlock>,
652 in_cleanup_block: bool,
656 impl<'a, 'tcx> Integrator<'a, 'tcx> {
657 fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
658 let new = BasicBlock::new(tgt.index() + self.block_idx);
659 debug!("updating target `{:?}`, new: `{:?}`", tgt, new);
663 fn make_integrate_local(&self, local: Local) -> Local {
664 if local == RETURN_PLACE {
665 return self.destination.local;
668 let idx = local.index() - 1;
669 if idx < self.args.len() {
670 return self.args[idx];
673 self.local_map[Local::new(idx - self.args.len())]
677 impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
678 fn tcx(&self) -> TyCtxt<'tcx> {
682 fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
683 *local = self.make_integrate_local(*local);
686 fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
687 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
688 let dest_proj_len = self.destination.projection.len();
689 if place.local == RETURN_PLACE && dest_proj_len > 0 {
690 let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
691 projs.extend(self.destination.projection);
692 projs.extend(place.projection);
694 place.projection = self.tcx.intern_place_elems(&*projs);
696 // Handles integrating any locals that occur in the base
698 self.super_place(place, context, location)
701 fn process_projection_elem(&mut self, elem: &PlaceElem<'tcx>) -> Option<PlaceElem<'tcx>> {
702 if let PlaceElem::Index(local) = elem {
703 let new_local = self.make_integrate_local(*local);
705 if new_local != *local {
706 return Some(PlaceElem::Index(new_local));
713 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
714 self.in_cleanup_block = data.is_cleanup;
715 self.super_basic_block_data(block, data);
716 self.in_cleanup_block = false;
719 fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
720 self.super_retag(kind, place, loc);
722 // We have to patch all inlined retags to be aware that they are no longer
723 // happening on function entry.
724 if *kind == RetagKind::FnEntry {
725 *kind = RetagKind::Default;
729 fn visit_terminator_kind(&mut self, kind: &mut TerminatorKind<'tcx>, loc: Location) {
730 self.super_terminator_kind(kind, loc);
733 TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
734 TerminatorKind::Goto { ref mut target } => {
735 *target = self.update_target(*target);
737 TerminatorKind::SwitchInt { ref mut targets, .. } => {
739 *tgt = self.update_target(*tgt);
742 TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
743 | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
744 *target = self.update_target(*target);
745 if let Some(tgt) = *unwind {
746 *unwind = Some(self.update_target(tgt));
747 } else if !self.in_cleanup_block {
748 // Unless this drop is in a cleanup block, add an unwind edge to
749 // the original call's cleanup block
750 *unwind = self.cleanup_block;
753 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
754 if let Some((_, ref mut tgt)) = *destination {
755 *tgt = self.update_target(*tgt);
757 if let Some(tgt) = *cleanup {
758 *cleanup = Some(self.update_target(tgt));
759 } else if !self.in_cleanup_block {
760 // Unless this call is in a cleanup block, add an unwind edge to
761 // the original call's cleanup block
762 *cleanup = self.cleanup_block;
765 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
766 *target = self.update_target(*target);
767 if let Some(tgt) = *cleanup {
768 *cleanup = Some(self.update_target(tgt));
769 } else if !self.in_cleanup_block {
770 // Unless this assert is in a cleanup block, add an unwind edge to
771 // the original call's cleanup block
772 *cleanup = self.cleanup_block;
775 TerminatorKind::Return => {
776 *kind = TerminatorKind::Goto { target: self.return_block };
778 TerminatorKind::Resume => {
779 if let Some(tgt) = self.cleanup_block {
780 *kind = TerminatorKind::Goto { target: tgt }
783 TerminatorKind::Abort => {}
784 TerminatorKind::Unreachable => {}
785 TerminatorKind::FalseEdges { ref mut real_target, ref mut imaginary_target } => {
786 *real_target = self.update_target(*real_target);
787 *imaginary_target = self.update_target(*imaginary_target);
789 TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
790 // see the ordering of passes in the optimized_mir query.
792 bug!("False unwinds should have been removed before inlining")
797 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
798 *scope = self.scope_map[*scope];