]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_mir_transform/src/coverage/graph.rs
Rollup merge of #93400 - ChayimFriedman2:dont-suggest-using-const-with-bounds-unused...
[rust.git] / compiler / rustc_mir_transform / src / coverage / graph.rs
1 use super::Error;
2
3 use itertools::Itertools;
4 use rustc_data_structures::fx::FxHashMap;
5 use rustc_data_structures::graph::dominators::{self, Dominators};
6 use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
7 use rustc_index::bit_set::BitSet;
8 use rustc_index::vec::IndexVec;
9 use rustc_middle::mir::coverage::*;
10 use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind};
11
12 use std::ops::{Index, IndexMut};
13
14 const ID_SEPARATOR: &str = ",";
15
16 /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
17 /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a
18 /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
19 /// set of additional counters--if needed--to count incoming edges, if there are more than one.
20 /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
21 #[derive(Debug)]
22 pub(super) struct CoverageGraph {
23     bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
24     bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
25     pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
26     pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
27     dominators: Option<Dominators<BasicCoverageBlock>>,
28 }
29
30 impl CoverageGraph {
31     pub fn from_mir(mir_body: &mir::Body<'_>) -> Self {
32         let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body);
33
34         // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
35         // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
36         // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a
37         // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so
38         // de-duplication is required. This is done without reordering the successors.
39
40         let bcbs_len = bcbs.len();
41         let mut seen = IndexVec::from_elem_n(false, bcbs_len);
42         let successors = IndexVec::from_fn_n(
43             |bcb| {
44                 for b in seen.iter_mut() {
45                     *b = false;
46                 }
47                 let bcb_data = &bcbs[bcb];
48                 let mut bcb_successors = Vec::new();
49                 for successor in
50                     bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind)
51                         .filter_map(|&successor_bb| bb_to_bcb[successor_bb])
52                 {
53                     if !seen[successor] {
54                         seen[successor] = true;
55                         bcb_successors.push(successor);
56                     }
57                 }
58                 bcb_successors
59             },
60             bcbs.len(),
61         );
62
63         let mut predecessors = IndexVec::from_elem_n(Vec::new(), bcbs.len());
64         for (bcb, bcb_successors) in successors.iter_enumerated() {
65             for &successor in bcb_successors {
66                 predecessors[successor].push(bcb);
67             }
68         }
69
70         let mut basic_coverage_blocks =
71             Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None };
72         let dominators = dominators::dominators(&basic_coverage_blocks);
73         basic_coverage_blocks.dominators = Some(dominators);
74         basic_coverage_blocks
75     }
76
77     fn compute_basic_coverage_blocks(
78         mir_body: &mir::Body<'_>,
79     ) -> (
80         IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
81         IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
82     ) {
83         let num_basic_blocks = mir_body.num_nodes();
84         let mut bcbs = IndexVec::with_capacity(num_basic_blocks);
85         let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks);
86
87         // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
88         // each block terminator's `successors()`. Coverage spans must map to actual source code,
89         // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
90         // intentionally omits unwind paths.
91         // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
92         // `catch_unwind()` handlers.
93         let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
94
95         let mut basic_blocks = Vec::new();
96         for (bb, data) in mir_cfg_without_unwind {
97             if let Some(last) = basic_blocks.last() {
98                 let predecessors = &mir_body.predecessors()[bb];
99                 if predecessors.len() > 1 || !predecessors.contains(last) {
100                     // The `bb` has more than one _incoming_ edge, and should start its own
101                     // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
102                     // include `bb`; it contains a sequence of one or more sequential basic_blocks
103                     // with no intermediate branches in or out. Save these as a new
104                     // `BasicCoverageBlockData` before starting the new one.)
105                     Self::add_basic_coverage_block(
106                         &mut bcbs,
107                         &mut bb_to_bcb,
108                         basic_blocks.split_off(0),
109                     );
110                     debug!(
111                         "  because {}",
112                         if predecessors.len() > 1 {
113                             "predecessors.len() > 1".to_owned()
114                         } else {
115                             format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
116                         }
117                     );
118                 }
119             }
120             basic_blocks.push(bb);
121
122             let term = data.terminator();
123
124             match term.kind {
125                 TerminatorKind::Return { .. }
126                 | TerminatorKind::Abort
127                 | TerminatorKind::Yield { .. }
128                 | TerminatorKind::SwitchInt { .. } => {
129                     // The `bb` has more than one _outgoing_ edge, or exits the function. Save the
130                     // current sequence of `basic_blocks` gathered to this point, as a new
131                     // `BasicCoverageBlockData`.
132                     Self::add_basic_coverage_block(
133                         &mut bcbs,
134                         &mut bb_to_bcb,
135                         basic_blocks.split_off(0),
136                     );
137                     debug!("  because term.kind = {:?}", term.kind);
138                     // Note that this condition is based on `TerminatorKind`, even though it
139                     // theoretically boils down to `successors().len() != 1`; that is, either zero
140                     // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
141                     // since the BCB CFG ignores things like unwind branches (which exist in the
142                     // `Terminator`s `successors()` list) checking the number of successors won't
143                     // work.
144                 }
145
146                 // The following `TerminatorKind`s are either not expected outside an unwind branch,
147                 // or they should not (under normal circumstances) branch. Coverage graphs are
148                 // simplified by assuring coverage results are accurate for program executions that
149                 // don't panic.
150                 //
151                 // Programs that panic and unwind may record slightly inaccurate coverage results
152                 // for a coverage region containing the `Terminator` that began the panic. This
153                 // is as intended. (See Issue #78544 for a possible future option to support
154                 // coverage in test programs that panic.)
155                 TerminatorKind::Goto { .. }
156                 | TerminatorKind::Resume
157                 | TerminatorKind::Unreachable
158                 | TerminatorKind::Drop { .. }
159                 | TerminatorKind::DropAndReplace { .. }
160                 | TerminatorKind::Call { .. }
161                 | TerminatorKind::GeneratorDrop
162                 | TerminatorKind::Assert { .. }
163                 | TerminatorKind::FalseEdge { .. }
164                 | TerminatorKind::FalseUnwind { .. }
165                 | TerminatorKind::InlineAsm { .. } => {}
166             }
167         }
168
169         if !basic_blocks.is_empty() {
170             // process any remaining basic_blocks into a final `BasicCoverageBlockData`
171             Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
172             debug!("  because the end of the MIR CFG was reached while traversing");
173         }
174
175         (bcbs, bb_to_bcb)
176     }
177
178     fn add_basic_coverage_block(
179         bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
180         bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
181         basic_blocks: Vec<BasicBlock>,
182     ) {
183         let bcb = BasicCoverageBlock::from_usize(bcbs.len());
184         for &bb in basic_blocks.iter() {
185             bb_to_bcb[bb] = Some(bcb);
186         }
187         let bcb_data = BasicCoverageBlockData::from(basic_blocks);
188         debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
189         bcbs.push(bcb_data);
190     }
191
192     #[inline(always)]
193     pub fn iter_enumerated(
194         &self,
195     ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
196         self.bcbs.iter_enumerated()
197     }
198
199     #[inline(always)]
200     pub fn iter_enumerated_mut(
201         &mut self,
202     ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
203         self.bcbs.iter_enumerated_mut()
204     }
205
206     #[inline(always)]
207     pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
208         if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
209     }
210
211     #[inline(always)]
212     pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
213         self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
214     }
215
216     #[inline(always)]
217     pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
218         self.dominators.as_ref().unwrap()
219     }
220 }
221
222 impl Index<BasicCoverageBlock> for CoverageGraph {
223     type Output = BasicCoverageBlockData;
224
225     #[inline]
226     fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
227         &self.bcbs[index]
228     }
229 }
230
231 impl IndexMut<BasicCoverageBlock> for CoverageGraph {
232     #[inline]
233     fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
234         &mut self.bcbs[index]
235     }
236 }
237
238 impl graph::DirectedGraph for CoverageGraph {
239     type Node = BasicCoverageBlock;
240 }
241
242 impl graph::WithNumNodes for CoverageGraph {
243     #[inline]
244     fn num_nodes(&self) -> usize {
245         self.bcbs.len()
246     }
247 }
248
249 impl graph::WithStartNode for CoverageGraph {
250     #[inline]
251     fn start_node(&self) -> Self::Node {
252         self.bcb_from_bb(mir::START_BLOCK)
253             .expect("mir::START_BLOCK should be in a BasicCoverageBlock")
254     }
255 }
256
257 type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
258
259 impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
260     type Item = BasicCoverageBlock;
261     type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
262 }
263
264 impl graph::WithSuccessors for CoverageGraph {
265     #[inline]
266     fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
267         self.successors[node].iter().cloned()
268     }
269 }
270
271 impl<'graph> graph::GraphPredecessors<'graph> for CoverageGraph {
272     type Item = BasicCoverageBlock;
273     type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
274 }
275
276 impl graph::WithPredecessors for CoverageGraph {
277     #[inline]
278     fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
279         self.predecessors[node].iter().copied()
280     }
281 }
282
283 rustc_index::newtype_index! {
284     /// A node in the control-flow graph of CoverageGraph.
285     pub(super) struct BasicCoverageBlock {
286         DEBUG_FORMAT = "bcb{}",
287         const START_BCB = 0,
288     }
289 }
290
291 /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
292 ///
293 /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
294 /// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
295 /// altering the original MIR CFG.
296 ///
297 /// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
298 /// necessary). The BCB-based CFG is a more aggressive simplification. For example:
299 ///
300 ///   * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
301 ///     that is injected by the Rust compiler but has no physical source code to count. This also
302 ///     means a BasicBlock with a `Call` terminator can be merged into its primary successor target
303 ///     block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
304 ///     of `#[should_panic]` tests and `catch_unwind()` handlers")
305 ///   * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
306 ///     not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
307 ///     a `Goto`, and merged with its successor into the same BCB.
308 ///
309 /// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
310 /// In some cases, a BCB's execution count can be computed by `Expression`. Additional
311 /// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
312 /// to the BCB's primary counter or expression).
313 ///
314 /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
315 /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
316 /// significance.
317 #[derive(Debug, Clone)]
318 pub(super) struct BasicCoverageBlockData {
319     pub basic_blocks: Vec<BasicBlock>,
320     pub counter_kind: Option<CoverageKind>,
321     edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
322 }
323
324 impl BasicCoverageBlockData {
325     pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
326         assert!(basic_blocks.len() > 0);
327         Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
328     }
329
330     #[inline(always)]
331     pub fn leader_bb(&self) -> BasicBlock {
332         self.basic_blocks[0]
333     }
334
335     #[inline(always)]
336     pub fn last_bb(&self) -> BasicBlock {
337         *self.basic_blocks.last().unwrap()
338     }
339
340     #[inline(always)]
341     pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
342         &mir_body[self.last_bb()].terminator()
343     }
344
345     pub fn set_counter(
346         &mut self,
347         counter_kind: CoverageKind,
348     ) -> Result<ExpressionOperandId, Error> {
349         debug_assert!(
350             // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
351             // have an expression (to be injected into an existing `BasicBlock` represented by this
352             // `BasicCoverageBlock`).
353             self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
354             "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
355         );
356         let operand = counter_kind.as_operand_id();
357         if let Some(replaced) = self.counter_kind.replace(counter_kind) {
358             Error::from_string(format!(
359                 "attempt to set a BasicCoverageBlock coverage counter more than once; \
360                 {:?} already had counter {:?}",
361                 self, replaced,
362             ))
363         } else {
364             Ok(operand)
365         }
366     }
367
368     #[inline(always)]
369     pub fn counter(&self) -> Option<&CoverageKind> {
370         self.counter_kind.as_ref()
371     }
372
373     #[inline(always)]
374     pub fn take_counter(&mut self) -> Option<CoverageKind> {
375         self.counter_kind.take()
376     }
377
378     pub fn set_edge_counter_from(
379         &mut self,
380         from_bcb: BasicCoverageBlock,
381         counter_kind: CoverageKind,
382     ) -> Result<ExpressionOperandId, Error> {
383         if level_enabled!(tracing::Level::DEBUG) {
384             // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
385             // have an expression (to be injected into an existing `BasicBlock` represented by this
386             // `BasicCoverageBlock`).
387             if !self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
388                 return Error::from_string(format!(
389                     "attempt to add an incoming edge counter from {:?} when the target BCB already \
390                     has a `Counter`",
391                     from_bcb
392                 ));
393             }
394         }
395         let operand = counter_kind.as_operand_id();
396         if let Some(replaced) =
397             self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
398         {
399             Error::from_string(format!(
400                 "attempt to set an edge counter more than once; from_bcb: \
401                 {:?} already had counter {:?}",
402                 from_bcb, replaced,
403             ))
404         } else {
405             Ok(operand)
406         }
407     }
408
409     #[inline]
410     pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
411         if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
412             edge_from_bcbs.get(&from_bcb)
413         } else {
414             None
415         }
416     }
417
418     #[inline]
419     pub fn take_edge_counters(
420         &mut self,
421     ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
422         self.edge_from_bcbs.take().map(|m| m.into_iter())
423     }
424
425     pub fn id(&self) -> String {
426         format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR))
427     }
428 }
429
430 /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
431 /// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
432 /// the specific branching BCB, representing the edge between the two. The latter case
433 /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
434 #[derive(Clone, Copy, PartialEq, Eq)]
435 pub(super) struct BcbBranch {
436     pub edge_from_bcb: Option<BasicCoverageBlock>,
437     pub target_bcb: BasicCoverageBlock,
438 }
439
440 impl BcbBranch {
441     pub fn from_to(
442         from_bcb: BasicCoverageBlock,
443         to_bcb: BasicCoverageBlock,
444         basic_coverage_blocks: &CoverageGraph,
445     ) -> Self {
446         let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
447             Some(from_bcb)
448         } else {
449             None
450         };
451         Self { edge_from_bcb, target_bcb: to_bcb }
452     }
453
454     pub fn counter<'a>(
455         &self,
456         basic_coverage_blocks: &'a CoverageGraph,
457     ) -> Option<&'a CoverageKind> {
458         if let Some(from_bcb) = self.edge_from_bcb {
459             basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
460         } else {
461             basic_coverage_blocks[self.target_bcb].counter()
462         }
463     }
464
465     pub fn is_only_path_to_target(&self) -> bool {
466         self.edge_from_bcb.is_none()
467     }
468 }
469
470 impl std::fmt::Debug for BcbBranch {
471     fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
472         if let Some(from_bcb) = self.edge_from_bcb {
473             write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
474         } else {
475             write!(fmt, "{:?}", self.target_bcb)
476         }
477     }
478 }
479
480 // Returns the `Terminator`s non-unwind successors.
481 // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
482 // `catch_unwind()` handlers.
483 fn bcb_filtered_successors<'a, 'tcx>(
484     body: &'tcx &'a mir::Body<'tcx>,
485     term_kind: &'tcx TerminatorKind<'tcx>,
486 ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> {
487     let mut successors = term_kind.successors();
488     Box::new(
489         match &term_kind {
490             // SwitchInt successors are never unwind, and all of them should be traversed.
491             TerminatorKind::SwitchInt { .. } => successors,
492             // For all other kinds, return only the first successor, if any, and ignore unwinds.
493             // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
494             // `next().into_iter()`) into the `mir::Successors` aliased type.
495             _ => successors.next().into_iter().chain(&[]),
496         }
497         .filter(move |&&successor| {
498             body[successor].terminator().kind != TerminatorKind::Unreachable
499         }),
500     )
501 }
502
503 /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
504 /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
505 /// ensures a loop is completely traversed before processing Blocks after the end of the loop.
506 #[derive(Debug)]
507 pub(super) struct TraversalContext {
508     /// From one or more backedges returning to a loop header.
509     pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
510
511     /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
512     /// backedges, such that the loop is the inner inner-most loop containing these
513     /// CoverageGraph
514     pub worklist: Vec<BasicCoverageBlock>,
515 }
516
517 pub(super) struct TraverseCoverageGraphWithLoops {
518     pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
519     pub context_stack: Vec<TraversalContext>,
520     visited: BitSet<BasicCoverageBlock>,
521 }
522
523 impl TraverseCoverageGraphWithLoops {
524     pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
525         let start_bcb = basic_coverage_blocks.start_node();
526         let backedges = find_loop_backedges(basic_coverage_blocks);
527         let context_stack =
528             vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
529         // `context_stack` starts with a `TraversalContext` for the main function context (beginning
530         // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
531         // of the stack as loops are entered, and popped off of the stack when a loop's worklist is
532         // exhausted.
533         let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
534         Self { backedges, context_stack, visited }
535     }
536
537     pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
538         debug!(
539             "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
540             self.context_stack.iter().rev().collect::<Vec<_>>()
541         );
542         while let Some(next_bcb) = {
543             // Strip contexts with empty worklists from the top of the stack
544             while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
545                 self.context_stack.pop();
546             }
547             // Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
548             self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
549         } {
550             if !self.visited.insert(next_bcb) {
551                 debug!("Already visited: {:?}", next_bcb);
552                 continue;
553             }
554             debug!("Visiting {:?}", next_bcb);
555             if self.backedges[next_bcb].len() > 0 {
556                 debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
557                 self.context_stack.push(TraversalContext {
558                     loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
559                     worklist: Vec::new(),
560                 });
561             }
562             self.extend_worklist(basic_coverage_blocks, next_bcb);
563             return Some(next_bcb);
564         }
565         None
566     }
567
568     pub fn extend_worklist(
569         &mut self,
570         basic_coverage_blocks: &CoverageGraph,
571         bcb: BasicCoverageBlock,
572     ) {
573         let successors = &basic_coverage_blocks.successors[bcb];
574         debug!("{:?} has {} successors:", bcb, successors.len());
575         for &successor in successors {
576             if successor == bcb {
577                 debug!(
578                     "{:?} has itself as its own successor. (Note, the compiled code will \
579                     generate an infinite loop.)",
580                     bcb
581                 );
582                 // Don't re-add this successor to the worklist. We are already processing it.
583                 break;
584             }
585             for context in self.context_stack.iter_mut().rev() {
586                 // Add successors of the current BCB to the appropriate context. Successors that
587                 // stay within a loop are added to the BCBs context worklist. Successors that
588                 // exit the loop (they are not dominated by the loop header) must be reachable
589                 // from other BCBs outside the loop, and they will be added to a different
590                 // worklist.
591                 //
592                 // Branching blocks (with more than one successor) must be processed before
593                 // blocks with only one successor, to prevent unnecessarily complicating
594                 // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
595                 // branching block would have given an `Expression` (or vice versa).
596                 let (some_successor_to_add, some_loop_header) =
597                     if let Some((_, loop_header)) = context.loop_backedges {
598                         if basic_coverage_blocks.is_dominated_by(successor, loop_header) {
599                             (Some(successor), Some(loop_header))
600                         } else {
601                             (None, None)
602                         }
603                     } else {
604                         (Some(successor), None)
605                     };
606                 if let Some(successor_to_add) = some_successor_to_add {
607                     if basic_coverage_blocks.successors[successor_to_add].len() > 1 {
608                         debug!(
609                             "{:?} successor is branching. Prioritize it at the beginning of \
610                             the {}",
611                             successor_to_add,
612                             if let Some(loop_header) = some_loop_header {
613                                 format!("worklist for the loop headed by {:?}", loop_header)
614                             } else {
615                                 String::from("non-loop worklist")
616                             },
617                         );
618                         context.worklist.insert(0, successor_to_add);
619                     } else {
620                         debug!(
621                             "{:?} successor is non-branching. Defer it to the end of the {}",
622                             successor_to_add,
623                             if let Some(loop_header) = some_loop_header {
624                                 format!("worklist for the loop headed by {:?}", loop_header)
625                             } else {
626                                 String::from("non-loop worklist")
627                             },
628                         );
629                         context.worklist.push(successor_to_add);
630                     }
631                     break;
632                 }
633             }
634         }
635     }
636
637     pub fn is_complete(&self) -> bool {
638         self.visited.count() == self.visited.domain_size()
639     }
640
641     pub fn unvisited(&self) -> Vec<BasicCoverageBlock> {
642         let mut unvisited_set: BitSet<BasicCoverageBlock> =
643             BitSet::new_filled(self.visited.domain_size());
644         unvisited_set.subtract(&self.visited);
645         unvisited_set.iter().collect::<Vec<_>>()
646     }
647 }
648
649 pub(super) fn find_loop_backedges(
650     basic_coverage_blocks: &CoverageGraph,
651 ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> {
652     let num_bcbs = basic_coverage_blocks.num_nodes();
653     let mut backedges = IndexVec::from_elem_n(Vec::<BasicCoverageBlock>::new(), num_bcbs);
654
655     // Identify loops by their backedges.
656     //
657     // The computational complexity is bounded by: n(s) x d where `n` is the number of
658     // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the
659     // MIR); `s` is the average number of successors per node (which is most likely less than 2, and
660     // independent of the size of the function, so it can be treated as a constant);
661     // and `d` is the average number of dominators per node.
662     //
663     // The average number of dominators depends on the size and complexity of the function, and
664     // nodes near the start of the function's control flow graph typically have less dominators
665     // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I
666     // think the resulting complexity has the characteristics of O(n log n).
667     //
668     // The overall complexity appears to be comparable to many other MIR transform algorithms, and I
669     // don't expect that this function is creating a performance hot spot, but if this becomes an
670     // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an
671     // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps
672     // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short
673     // circuit downstream `is_dominated_by` checks.
674     //
675     // For now, that kind of optimization seems unnecessarily complicated.
676     for (bcb, _) in basic_coverage_blocks.iter_enumerated() {
677         for &successor in &basic_coverage_blocks.successors[bcb] {
678             if basic_coverage_blocks.is_dominated_by(bcb, successor) {
679                 let loop_header = successor;
680                 let backedge_from_bcb = bcb;
681                 debug!(
682                     "Found BCB backedge: {:?} -> loop_header: {:?}",
683                     backedge_from_bcb, loop_header
684                 );
685                 backedges[loop_header].push(backedge_from_bcb);
686             }
687         }
688     }
689     backedges
690 }
691
692 pub struct ShortCircuitPreorder<
693     'a,
694     'tcx,
695     F: Fn(
696         &'tcx &'a mir::Body<'tcx>,
697         &'tcx TerminatorKind<'tcx>,
698     ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
699 > {
700     body: &'tcx &'a mir::Body<'tcx>,
701     visited: BitSet<BasicBlock>,
702     worklist: Vec<BasicBlock>,
703     filtered_successors: F,
704 }
705
706 impl<
707     'a,
708     'tcx,
709     F: Fn(
710         &'tcx &'a mir::Body<'tcx>,
711         &'tcx TerminatorKind<'tcx>,
712     ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
713 > ShortCircuitPreorder<'a, 'tcx, F>
714 {
715     pub fn new(
716         body: &'tcx &'a mir::Body<'tcx>,
717         filtered_successors: F,
718     ) -> ShortCircuitPreorder<'a, 'tcx, F> {
719         let worklist = vec![mir::START_BLOCK];
720
721         ShortCircuitPreorder {
722             body,
723             visited: BitSet::new_empty(body.basic_blocks().len()),
724             worklist,
725             filtered_successors,
726         }
727     }
728 }
729
730 impl<
731     'a: 'tcx,
732     'tcx,
733     F: Fn(
734         &'tcx &'a mir::Body<'tcx>,
735         &'tcx TerminatorKind<'tcx>,
736     ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>,
737 > Iterator for ShortCircuitPreorder<'a, 'tcx, F>
738 {
739     type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
740
741     fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
742         while let Some(idx) = self.worklist.pop() {
743             if !self.visited.insert(idx) {
744                 continue;
745             }
746
747             let data = &self.body[idx];
748
749             if let Some(ref term) = data.terminator {
750                 self.worklist.extend((self.filtered_successors)(&self.body, &term.kind));
751             }
752
753             return Some((idx, data));
754         }
755
756         None
757     }
758
759     fn size_hint(&self) -> (usize, Option<usize>) {
760         let size = self.body.basic_blocks().len() - self.visited.count();
761         (size, Some(size))
762     }
763 }