]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_mir/src/transform/coverage/graph.rs
Rollup merge of #80935 - pierwill:rustc_middle-levelandsource, r=petrochenkov
[rust.git] / compiler / rustc_mir / src / transform / coverage / graph.rs
1 use super::Error;
2
3 use rustc_data_structures::fx::FxHashMap;
4 use rustc_data_structures::graph::dominators::{self, Dominators};
5 use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
6 use rustc_index::bit_set::BitSet;
7 use rustc_index::vec::IndexVec;
8 use rustc_middle::mir::coverage::*;
9 use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind};
10
11 use std::ops::{Index, IndexMut};
12
13 const ID_SEPARATOR: &str = ",";
14
15 /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
16 /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a
17 /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
18 /// set of additional counters--if needed--to count incoming edges, if there are more than one.
19 /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
20 #[derive(Debug)]
21 pub(super) struct CoverageGraph {
22     bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
23     bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
24     pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
25     pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
26     dominators: Option<Dominators<BasicCoverageBlock>>,
27 }
28
29 impl CoverageGraph {
30     pub fn from_mir(mir_body: &mir::Body<'tcx>) -> Self {
31         let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body);
32
33         // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
34         // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
35         // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a
36         // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so
37         // de-duplication is required. This is done without reordering the successors.
38
39         let bcbs_len = bcbs.len();
40         let mut seen = IndexVec::from_elem_n(false, bcbs_len);
41         let successors = IndexVec::from_fn_n(
42             |bcb| {
43                 for b in seen.iter_mut() {
44                     *b = false;
45                 }
46                 let bcb_data = &bcbs[bcb];
47                 let mut bcb_successors = Vec::new();
48                 for successor in
49                     bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind)
50                         .filter_map(|&successor_bb| bb_to_bcb[successor_bb])
51                 {
52                     if !seen[successor] {
53                         seen[successor] = true;
54                         bcb_successors.push(successor);
55                     }
56                 }
57                 bcb_successors
58             },
59             bcbs.len(),
60         );
61
62         let mut predecessors = IndexVec::from_elem_n(Vec::new(), bcbs.len());
63         for (bcb, bcb_successors) in successors.iter_enumerated() {
64             for &successor in bcb_successors {
65                 predecessors[successor].push(bcb);
66             }
67         }
68
69         let mut basic_coverage_blocks =
70             Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None };
71         let dominators = dominators::dominators(&basic_coverage_blocks);
72         basic_coverage_blocks.dominators = Some(dominators);
73         basic_coverage_blocks
74     }
75
76     fn compute_basic_coverage_blocks(
77         mir_body: &mir::Body<'tcx>,
78     ) -> (
79         IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
80         IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
81     ) {
82         let num_basic_blocks = mir_body.num_nodes();
83         let mut bcbs = IndexVec::with_capacity(num_basic_blocks);
84         let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks);
85
86         // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
87         // each block terminator's `successors()`. Coverage spans must map to actual source code,
88         // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
89         // intentionally omits unwind paths.
90         // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
91         // `catch_unwind()` handlers.
92         let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
93
94         let mut basic_blocks = Vec::new();
95         for (bb, data) in mir_cfg_without_unwind {
96             if let Some(last) = basic_blocks.last() {
97                 let predecessors = &mir_body.predecessors()[bb];
98                 if predecessors.len() > 1 || !predecessors.contains(last) {
99                     // The `bb` has more than one _incoming_ edge, and should start its own
100                     // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
101                     // include `bb`; it contains a sequence of one or more sequential basic_blocks
102                     // with no intermediate branches in or out. Save these as a new
103                     // `BasicCoverageBlockData` before starting the new one.)
104                     Self::add_basic_coverage_block(
105                         &mut bcbs,
106                         &mut bb_to_bcb,
107                         basic_blocks.split_off(0),
108                     );
109                     debug!(
110                         "  because {}",
111                         if predecessors.len() > 1 {
112                             "predecessors.len() > 1".to_owned()
113                         } else {
114                             format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
115                         }
116                     );
117                 }
118             }
119             basic_blocks.push(bb);
120
121             let term = data.terminator();
122
123             match term.kind {
124                 TerminatorKind::Return { .. }
125                 | TerminatorKind::Abort
126                 | TerminatorKind::Yield { .. }
127                 | TerminatorKind::SwitchInt { .. } => {
128                     // The `bb` has more than one _outgoing_ edge, or exits the function. Save the
129                     // current sequence of `basic_blocks` gathered to this point, as a new
130                     // `BasicCoverageBlockData`.
131                     Self::add_basic_coverage_block(
132                         &mut bcbs,
133                         &mut bb_to_bcb,
134                         basic_blocks.split_off(0),
135                     );
136                     debug!("  because term.kind = {:?}", term.kind);
137                     // Note that this condition is based on `TerminatorKind`, even though it
138                     // theoretically boils down to `successors().len() != 1`; that is, either zero
139                     // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
140                     // since the BCB CFG ignores things like unwind branches (which exist in the
141                     // `Terminator`s `successors()` list) checking the number of successors won't
142                     // work.
143                 }
144
145                 // The following `TerminatorKind`s are either not expected outside an unwind branch,
146                 // or they should not (under normal circumstances) branch. Coverage graphs are
147                 // simplified by assuring coverage results are accurate for program executions that
148                 // don't panic.
149                 //
150                 // Programs that panic and unwind may record slightly inaccurate coverage results
151                 // for a coverage region containing the `Terminator` that began the panic. This
152                 // is as intended. (See Issue #78544 for a possible future option to support
153                 // coverage in test programs that panic.)
154                 TerminatorKind::Goto { .. }
155                 | TerminatorKind::Resume
156                 | TerminatorKind::Unreachable
157                 | TerminatorKind::Drop { .. }
158                 | TerminatorKind::DropAndReplace { .. }
159                 | TerminatorKind::Call { .. }
160                 | TerminatorKind::GeneratorDrop
161                 | TerminatorKind::Assert { .. }
162                 | TerminatorKind::FalseEdge { .. }
163                 | TerminatorKind::FalseUnwind { .. }
164                 | TerminatorKind::InlineAsm { .. } => {}
165             }
166         }
167
168         if !basic_blocks.is_empty() {
169             // process any remaining basic_blocks into a final `BasicCoverageBlockData`
170             Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
171             debug!("  because the end of the MIR CFG was reached while traversing");
172         }
173
174         (bcbs, bb_to_bcb)
175     }
176
177     fn add_basic_coverage_block(
178         bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
179         bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
180         basic_blocks: Vec<BasicBlock>,
181     ) {
182         let bcb = BasicCoverageBlock::from_usize(bcbs.len());
183         for &bb in basic_blocks.iter() {
184             bb_to_bcb[bb] = Some(bcb);
185         }
186         let bcb_data = BasicCoverageBlockData::from(basic_blocks);
187         debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
188         bcbs.push(bcb_data);
189     }
190
191     #[inline(always)]
192     pub fn iter_enumerated(
193         &self,
194     ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
195         self.bcbs.iter_enumerated()
196     }
197
198     #[inline(always)]
199     pub fn iter_enumerated_mut(
200         &mut self,
201     ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
202         self.bcbs.iter_enumerated_mut()
203     }
204
205     #[inline(always)]
206     pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
207         if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
208     }
209
210     #[inline(always)]
211     pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
212         self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
213     }
214
215     #[inline(always)]
216     pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
217         self.dominators.as_ref().unwrap()
218     }
219 }
220
221 impl Index<BasicCoverageBlock> for CoverageGraph {
222     type Output = BasicCoverageBlockData;
223
224     #[inline]
225     fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
226         &self.bcbs[index]
227     }
228 }
229
230 impl IndexMut<BasicCoverageBlock> for CoverageGraph {
231     #[inline]
232     fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
233         &mut self.bcbs[index]
234     }
235 }
236
237 impl graph::DirectedGraph for CoverageGraph {
238     type Node = BasicCoverageBlock;
239 }
240
241 impl graph::WithNumNodes for CoverageGraph {
242     #[inline]
243     fn num_nodes(&self) -> usize {
244         self.bcbs.len()
245     }
246 }
247
248 impl graph::WithStartNode for CoverageGraph {
249     #[inline]
250     fn start_node(&self) -> Self::Node {
251         self.bcb_from_bb(mir::START_BLOCK)
252             .expect("mir::START_BLOCK should be in a BasicCoverageBlock")
253     }
254 }
255
256 type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
257
258 impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
259     type Item = BasicCoverageBlock;
260     type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
261 }
262
263 impl graph::WithSuccessors for CoverageGraph {
264     #[inline]
265     fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
266         self.successors[node].iter().cloned()
267     }
268 }
269
270 impl graph::GraphPredecessors<'graph> for CoverageGraph {
271     type Item = BasicCoverageBlock;
272     type Iter = std::vec::IntoIter<BasicCoverageBlock>;
273 }
274
275 impl graph::WithPredecessors for CoverageGraph {
276     #[inline]
277     fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
278         self.predecessors[node].clone().into_iter()
279     }
280 }
281
282 rustc_index::newtype_index! {
283     /// A node in the [control-flow graph][CFG] of CoverageGraph.
284     pub(super) struct BasicCoverageBlock {
285         DEBUG_FORMAT = "bcb{}",
286         const START_BCB = 0,
287     }
288 }
289
290 /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
291 ///
292 /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
293 /// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
294 /// altering the original MIR CFG.
295 ///
296 /// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
297 /// necessary). The BCB-based CFG is a more aggressive simplification. For example:
298 ///
299 ///   * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
300 ///     that is injected by the Rust compiler but has no physical source code to count. This also
301 ///     means a BasicBlock with a `Call` terminator can be merged into its primary successor target
302 ///     block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
303 ///     of `#[should_panic]` tests and `catch_unwind()` handlers")
304 ///   * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
305 ///     not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
306 ///     a `Goto`, and merged with its successor into the same BCB.
307 ///
308 /// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
309 /// In some cases, a BCB's execution count can be computed by `Expression`. Additional
310 /// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
311 /// to the BCB's primary counter or expression).
312 ///
313 /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
314 /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
315 /// significance.
316 #[derive(Debug, Clone)]
317 pub(super) struct BasicCoverageBlockData {
318     pub basic_blocks: Vec<BasicBlock>,
319     pub counter_kind: Option<CoverageKind>,
320     edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
321 }
322
323 impl BasicCoverageBlockData {
324     pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
325         assert!(basic_blocks.len() > 0);
326         Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
327     }
328
329     #[inline(always)]
330     pub fn leader_bb(&self) -> BasicBlock {
331         self.basic_blocks[0]
332     }
333
334     #[inline(always)]
335     pub fn last_bb(&self) -> BasicBlock {
336         *self.basic_blocks.last().unwrap()
337     }
338
339     #[inline(always)]
340     pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
341         &mir_body[self.last_bb()].terminator()
342     }
343
344     pub fn set_counter(
345         &mut self,
346         counter_kind: CoverageKind,
347     ) -> Result<ExpressionOperandId, Error> {
348         debug_assert!(
349             // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
350             // have an expression (to be injected into an existing `BasicBlock` represented by this
351             // `BasicCoverageBlock`).
352             self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
353             "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
354         );
355         let operand = counter_kind.as_operand_id();
356         if let Some(replaced) = self.counter_kind.replace(counter_kind) {
357             Error::from_string(format!(
358                 "attempt to set a BasicCoverageBlock coverage counter more than once; \
359                 {:?} already had counter {:?}",
360                 self, replaced,
361             ))
362         } else {
363             Ok(operand)
364         }
365     }
366
367     #[inline(always)]
368     pub fn counter(&self) -> Option<&CoverageKind> {
369         self.counter_kind.as_ref()
370     }
371
372     #[inline(always)]
373     pub fn take_counter(&mut self) -> Option<CoverageKind> {
374         self.counter_kind.take()
375     }
376
377     pub fn set_edge_counter_from(
378         &mut self,
379         from_bcb: BasicCoverageBlock,
380         counter_kind: CoverageKind,
381     ) -> Result<ExpressionOperandId, Error> {
382         if level_enabled!(tracing::Level::DEBUG) {
383             // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
384             // have an expression (to be injected into an existing `BasicBlock` represented by this
385             // `BasicCoverageBlock`).
386             if !self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
387                 return Error::from_string(format!(
388                     "attempt to add an incoming edge counter from {:?} when the target BCB already \
389                     has a `Counter`",
390                     from_bcb
391                 ));
392             }
393         }
394         let operand = counter_kind.as_operand_id();
395         if let Some(replaced) = self
396             .edge_from_bcbs
397             .get_or_insert_with(FxHashMap::default)
398             .insert(from_bcb, counter_kind)
399         {
400             Error::from_string(format!(
401                 "attempt to set an edge counter more than once; from_bcb: \
402                 {:?} already had counter {:?}",
403                 from_bcb, replaced,
404             ))
405         } else {
406             Ok(operand)
407         }
408     }
409
410     #[inline]
411     pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
412         if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
413             edge_from_bcbs.get(&from_bcb)
414         } else {
415             None
416         }
417     }
418
419     #[inline]
420     pub fn take_edge_counters(
421         &mut self,
422     ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
423         self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter()))
424     }
425
426     pub fn id(&self) -> String {
427         format!(
428             "@{}",
429             self.basic_blocks
430                 .iter()
431                 .map(|bb| bb.index().to_string())
432                 .collect::<Vec<_>>()
433                 .join(ID_SEPARATOR)
434         )
435     }
436 }
437
438 /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
439 /// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
440 /// the specific branching BCB, representing the edge between the two. The latter case
441 /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
442 #[derive(Clone, Copy, PartialEq, Eq)]
443 pub(super) struct BcbBranch {
444     pub edge_from_bcb: Option<BasicCoverageBlock>,
445     pub target_bcb: BasicCoverageBlock,
446 }
447
448 impl BcbBranch {
449     pub fn from_to(
450         from_bcb: BasicCoverageBlock,
451         to_bcb: BasicCoverageBlock,
452         basic_coverage_blocks: &CoverageGraph,
453     ) -> Self {
454         let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
455             Some(from_bcb)
456         } else {
457             None
458         };
459         Self { edge_from_bcb, target_bcb: to_bcb }
460     }
461
462     pub fn counter<'a>(
463         &self,
464         basic_coverage_blocks: &'a CoverageGraph,
465     ) -> Option<&'a CoverageKind> {
466         if let Some(from_bcb) = self.edge_from_bcb {
467             basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
468         } else {
469             basic_coverage_blocks[self.target_bcb].counter()
470         }
471     }
472
473     pub fn is_only_path_to_target(&self) -> bool {
474         self.edge_from_bcb.is_none()
475     }
476 }
477
478 impl std::fmt::Debug for BcbBranch {
479     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
480         if let Some(from_bcb) = self.edge_from_bcb {
481             write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
482         } else {
483             write!(fmt, "{:?}", self.target_bcb)
484         }
485     }
486 }
487
488 // Returns the `Terminator`s non-unwind successors.
489 // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
490 // `catch_unwind()` handlers.
491 fn bcb_filtered_successors<'a, 'tcx>(
492     body: &'tcx &'a mir::Body<'tcx>,
493     term_kind: &'tcx TerminatorKind<'tcx>,
494 ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> {
495     let mut successors = term_kind.successors();
496     box match &term_kind {
497         // SwitchInt successors are never unwind, and all of them should be traversed.
498         TerminatorKind::SwitchInt { .. } => successors,
499         // For all other kinds, return only the first successor, if any, and ignore unwinds.
500         // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
501         // `next().into_iter()`) into the `mir::Successors` aliased type.
502         _ => successors.next().into_iter().chain(&[]),
503     }
504     .filter(move |&&successor| body[successor].terminator().kind != TerminatorKind::Unreachable)
505 }
506
507 /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
508 /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
509 /// ensures a loop is completely traversed before processing Blocks after the end of the loop.
510 #[derive(Debug)]
511 pub(super) struct TraversalContext {
512     /// From one or more backedges returning to a loop header.
513     pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
514
515     /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
516     /// backedges, such that the loop is the inner inner-most loop containing these
517     /// CoverageGraph
518     pub worklist: Vec<BasicCoverageBlock>,
519 }
520
521 pub(super) struct TraverseCoverageGraphWithLoops {
522     pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
523     pub context_stack: Vec<TraversalContext>,
524     visited: BitSet<BasicCoverageBlock>,
525 }
526
527 impl TraverseCoverageGraphWithLoops {
528     pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
529         let start_bcb = basic_coverage_blocks.start_node();
530         let backedges = find_loop_backedges(basic_coverage_blocks);
531         let mut context_stack = Vec::new();
532         context_stack.push(TraversalContext { loop_backedges: None, worklist: vec![start_bcb] });
533         // `context_stack` starts with a `TraversalContext` for the main function context (beginning
534         // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
535         // of the stack as loops are entered, and popped off of the stack when a loop's worklist is
536         // exhausted.
537         let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
538         Self { backedges, context_stack, visited }
539     }
540
541     pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
542         debug!(
543             "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
544             self.context_stack.iter().rev().collect::<Vec<_>>()
545         );
546         while let Some(next_bcb) = {
547             // Strip contexts with empty worklists from the top of the stack
548             while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
549                 self.context_stack.pop();
550             }
551             // Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
552             self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
553         } {
554             if !self.visited.insert(next_bcb) {
555                 debug!("Already visited: {:?}", next_bcb);
556                 continue;
557             }
558             debug!("Visiting {:?}", next_bcb);
559             if self.backedges[next_bcb].len() > 0 {
560                 debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
561                 self.context_stack.push(TraversalContext {
562                     loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
563                     worklist: Vec::new(),
564                 });
565             }
566             self.extend_worklist(basic_coverage_blocks, next_bcb);
567             return Some(next_bcb);
568         }
569         None
570     }
571
572     pub fn extend_worklist(
573         &mut self,
574         basic_coverage_blocks: &CoverageGraph,
575         bcb: BasicCoverageBlock,
576     ) {
577         let successors = &basic_coverage_blocks.successors[bcb];
578         debug!("{:?} has {} successors:", bcb, successors.len());
579         for &successor in successors {
580             if successor == bcb {
581                 debug!(
582                     "{:?} has itself as its own successor. (Note, the compiled code will \
583                     generate an infinite loop.)",
584                     bcb
585                 );
586                 // Don't re-add this successor to the worklist. We are already processing it.
587                 break;
588             }
589             for context in self.context_stack.iter_mut().rev() {
590                 // Add successors of the current BCB to the appropriate context. Successors that
591                 // stay within a loop are added to the BCBs context worklist. Successors that
592                 // exit the loop (they are not dominated by the loop header) must be reachable
593                 // from other BCBs outside the loop, and they will be added to a different
594                 // worklist.
595                 //
596                 // Branching blocks (with more than one successor) must be processed before
597                 // blocks with only one successor, to prevent unnecessarily complicating
598                 // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
599                 // branching block would have given an `Expression` (or vice versa).
600                 let (some_successor_to_add, some_loop_header) =
601                     if let Some((_, loop_header)) = context.loop_backedges {
602                         if basic_coverage_blocks.is_dominated_by(successor, loop_header) {
603                             (Some(successor), Some(loop_header))
604                         } else {
605                             (None, None)
606                         }
607                     } else {
608                         (Some(successor), None)
609                     };
610                 if let Some(successor_to_add) = some_successor_to_add {
611                     if basic_coverage_blocks.successors[successor_to_add].len() > 1 {
612                         debug!(
613                             "{:?} successor is branching. Prioritize it at the beginning of \
614                             the {}",
615                             successor_to_add,
616                             if let Some(loop_header) = some_loop_header {
617                                 format!("worklist for the loop headed by {:?}", loop_header)
618                             } else {
619                                 String::from("non-loop worklist")
620                             },
621                         );
622                         context.worklist.insert(0, successor_to_add);
623                     } else {
624                         debug!(
625                             "{:?} successor is non-branching. Defer it to the end of the {}",
626                             successor_to_add,
627                             if let Some(loop_header) = some_loop_header {
628                                 format!("worklist for the loop headed by {:?}", loop_header)
629                             } else {
630                                 String::from("non-loop worklist")
631                             },
632                         );
633                         context.worklist.push(successor_to_add);
634                     }
635                     break;
636                 }
637             }
638         }
639     }
640
641     pub fn is_complete(&self) -> bool {
642         self.visited.count() == self.visited.domain_size()
643     }
644
645     pub fn unvisited(&self) -> Vec<BasicCoverageBlock> {
646         let mut unvisited_set: BitSet<BasicCoverageBlock> =
647             BitSet::new_filled(self.visited.domain_size());
648         unvisited_set.subtract(&self.visited);
649         unvisited_set.iter().collect::<Vec<_>>()
650     }
651 }
652
653 pub(super) fn find_loop_backedges(
654     basic_coverage_blocks: &CoverageGraph,
655 ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> {
656     let num_bcbs = basic_coverage_blocks.num_nodes();
657     let mut backedges = IndexVec::from_elem_n(Vec::<BasicCoverageBlock>::new(), num_bcbs);
658
659     // Identify loops by their backedges.
660     //
661     // The computational complexity is bounded by: n(s) x d where `n` is the number of
662     // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the
663     // MIR); `s` is the average number of successors per node (which is most likely less than 2, and
664     // independent of the size of the function, so it can be treated as a constant);
665     // and `d` is the average number of dominators per node.
666     //
667     // The average number of dominators depends on the size and complexity of the function, and
668     // nodes near the start of the function's control flow graph typically have less dominators
669     // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I
670     // think the resulting complexity has the characteristics of O(n log n).
671     //
672     // The overall complexity appears to be comparable to many other MIR transform algorithms, and I
673     // don't expect that this function is creating a performance hot spot, but if this becomes an
674     // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an
675     // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps
676     // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short
677     // circuit downstream `is_dominated_by` checks.
678     //
679     // For now, that kind of optimization seems unnecessarily complicated.
680     for (bcb, _) in basic_coverage_blocks.iter_enumerated() {
681         for &successor in &basic_coverage_blocks.successors[bcb] {
682             if basic_coverage_blocks.is_dominated_by(bcb, successor) {
683                 let loop_header = successor;
684                 let backedge_from_bcb = bcb;
685                 debug!(
686                     "Found BCB backedge: {:?} -> loop_header: {:?}",
687                     backedge_from_bcb, loop_header
688                 );
689                 backedges[loop_header].push(backedge_from_bcb);
690             }
691         }
692     }
693     backedges
694 }
695
696 pub struct ShortCircuitPreorder<
697     'a,
698     'tcx,
699     F: Fn(
700         &'tcx &'a mir::Body<'tcx>,
701         &'tcx TerminatorKind<'tcx>,
702     ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
703 > {
704     body: &'tcx &'a mir::Body<'tcx>,
705     visited: BitSet<BasicBlock>,
706     worklist: Vec<BasicBlock>,
707     filtered_successors: F,
708 }
709
710 impl<
711     'a,
712     'tcx,
713     F: Fn(
714         &'tcx &'a mir::Body<'tcx>,
715         &'tcx TerminatorKind<'tcx>,
716     ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
717 > ShortCircuitPreorder<'a, 'tcx, F>
718 {
719     pub fn new(
720         body: &'tcx &'a mir::Body<'tcx>,
721         filtered_successors: F,
722     ) -> ShortCircuitPreorder<'a, 'tcx, F> {
723         let worklist = vec![mir::START_BLOCK];
724
725         ShortCircuitPreorder {
726             body,
727             visited: BitSet::new_empty(body.basic_blocks().len()),
728             worklist,
729             filtered_successors,
730         }
731     }
732 }
733
734 impl<
735     'a: 'tcx,
736     'tcx,
737     F: Fn(
738         &'tcx &'a mir::Body<'tcx>,
739         &'tcx TerminatorKind<'tcx>,
740     ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
741 > Iterator for ShortCircuitPreorder<'a, 'tcx, F>
742 {
743     type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
744
745     fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
746         while let Some(idx) = self.worklist.pop() {
747             if !self.visited.insert(idx) {
748                 continue;
749             }
750
751             let data = &self.body[idx];
752
753             if let Some(ref term) = data.terminator {
754                 self.worklist.extend((self.filtered_successors)(&self.body, &term.kind));
755             }
756
757             return Some((idx, data));
758         }
759
760         None
761     }
762
763     fn size_hint(&self) -> (usize, Option<usize>) {
764         let size = self.body.basic_blocks().len() - self.visited.count();
765         (size, Some(size))
766     }
767 }