1 use super::debug::term_type;
2 use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB};
4 use rustc_data_structures::graph::WithNumNodes;
5 use rustc_middle::mir::spanview::source_range_no_file;
6 use rustc_middle::mir::{
7 self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
10 use rustc_middle::ty::TyCtxt;
11 use rustc_span::source_map::original_sp;
12 use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol};
14 use std::cell::RefCell;
15 use std::cmp::Ordering;
17 #[derive(Debug, Copy, Clone)]
18 pub(super) enum CoverageStatement {
19 Statement(BasicBlock, Span, usize),
20 Terminator(BasicBlock, Span),
23 impl CoverageStatement {
24 pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
26 Self::Statement(bb, span, stmt_index) => {
27 let stmt = &mir_body[bb].statements[stmt_index];
30 source_range_no_file(tcx, &span),
36 Self::Terminator(bb, span) => {
37 let term = mir_body[bb].terminator();
40 source_range_no_file(tcx, &span),
42 term_type(&term.kind),
49 pub fn span(&self) -> &Span {
51 Self::Statement(_, span, _) | Self::Terminator(_, span) => span,
56 /// A BCB is deconstructed into one or more `Span`s. Each `Span` maps to a `CoverageSpan` that
57 /// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s.
58 /// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent
59 /// transforms can combine adjacent `Span`s and `CoverageSpan` from the same BCB, merging the
60 /// `CoverageStatement` vectors, and the `Span`s to cover the extent of the combined `Span`s.
62 /// Note: A `CoverageStatement` merged into another CoverageSpan may come from a `BasicBlock` that
63 /// is not part of the `CoverageSpan` bcb if the statement was included because it's `Span` matches
64 /// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock`
65 /// `is_dominated_by()` the `BasicBlock`s in this `CoverageSpan`.
66 #[derive(Debug, Clone)]
67 pub(super) struct CoverageSpan {
70 pub current_macro_or_none: RefCell<Option<Option<Symbol>>>,
71 pub bcb: BasicCoverageBlock,
72 pub coverage_statements: Vec<CoverageStatement>,
77 pub fn for_fn_sig(fn_sig_span: Span) -> Self {
80 expn_span: fn_sig_span,
81 current_macro_or_none: Default::default(),
83 coverage_statements: vec![],
89 statement: &Statement<'_>,
92 bcb: BasicCoverageBlock,
96 let is_closure = match statement.kind {
97 StatementKind::Assign(box (_, Rvalue::Aggregate(box ref kind, _))) => {
98 matches!(kind, AggregateKind::Closure(_, _) | AggregateKind::Generator(_, _, _))
106 current_macro_or_none: Default::default(),
108 coverage_statements: vec![CoverageStatement::Statement(bb, span, stmt_index)],
113 pub fn for_terminator(
116 bcb: BasicCoverageBlock,
122 current_macro_or_none: Default::default(),
124 coverage_statements: vec![CoverageStatement::Terminator(bb, span)],
129 pub fn merge_from(&mut self, mut other: CoverageSpan) {
130 debug_assert!(self.is_mergeable(&other));
131 self.span = self.span.to(other.span);
132 self.coverage_statements.append(&mut other.coverage_statements);
135 pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
136 self.coverage_statements.retain(|covstmt| covstmt.span().hi() <= cutoff_pos);
137 if let Some(highest_covstmt) =
138 self.coverage_statements.iter().max_by_key(|covstmt| covstmt.span().hi())
140 self.span = self.span.with_hi(highest_covstmt.span().hi());
145 pub fn is_mergeable(&self, other: &Self) -> bool {
146 self.is_in_same_bcb(other) && !(self.is_closure || other.is_closure)
150 pub fn is_in_same_bcb(&self, other: &Self) -> bool {
151 self.bcb == other.bcb
154 pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
157 source_range_no_file(tcx, &self.span),
158 self.format_coverage_statements(tcx, mir_body).replace('\n', "\n "),
162 pub fn format_coverage_statements<'tcx>(
165 mir_body: &mir::Body<'tcx>,
167 let mut sorted_coverage_statements = self.coverage_statements.clone();
168 sorted_coverage_statements.sort_unstable_by_key(|covstmt| match *covstmt {
169 CoverageStatement::Statement(bb, _, index) => (bb, index),
170 CoverageStatement::Terminator(bb, _) => (bb, usize::MAX),
172 sorted_coverage_statements
174 .map(|covstmt| covstmt.format(tcx, mir_body))
179 /// If the span is part of a macro, returns the macro name symbol.
180 pub fn current_macro(&self) -> Option<Symbol> {
181 self.current_macro_or_none
183 .get_or_insert_with(|| {
184 if let ExpnKind::Macro(MacroKind::Bang, current_macro) =
185 self.expn_span.ctxt().outer_expn_data().kind
187 return Some(current_macro);
191 .map(|symbol| symbol)
194 /// If the span is part of a macro, and the macro is visible (expands directly to the given
195 /// body_span), returns the macro name symbol.
196 pub fn visible_macro(&self, body_span: Span) -> Option<Symbol> {
197 if let Some(current_macro) = self.current_macro() {
201 .unwrap_or_else(|| bug!("macro must have a parent"))
205 return Some(current_macro);
211 pub fn is_macro_expansion(&self) -> bool {
212 self.current_macro().is_some()
216 /// Converts the initial set of `CoverageSpan`s (one per MIR `Statement` or `Terminator`) into a
217 /// minimal set of `CoverageSpan`s, using the BCB CFG to determine where it is safe and useful to:
219 /// * Remove duplicate source code coverage regions
220 /// * Merge spans that represent continuous (both in source code and control flow), non-branching
222 /// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures)
223 pub struct CoverageSpans<'a, 'tcx> {
224 /// The MIR, used to look up `BasicBlockData`.
225 mir_body: &'a mir::Body<'tcx>,
227 /// A `Span` covering the signature of function for the MIR.
230 /// A `Span` covering the function body of the MIR (typically from left curly brace to right
234 /// The BasicCoverageBlock Control Flow Graph (BCB CFG).
235 basic_coverage_blocks: &'a CoverageGraph,
237 /// The initial set of `CoverageSpan`s, sorted by `Span` (`lo` and `hi`) and by relative
238 /// dominance between the `BasicCoverageBlock`s of equal `Span`s.
239 sorted_spans_iter: Option<std::vec::IntoIter<CoverageSpan>>,
241 /// The current `CoverageSpan` to compare to its `prev`, to possibly merge, discard, force the
242 /// discard of the `prev` (and or `pending_dups`), or keep both (with `prev` moved to
243 /// `pending_dups`). If `curr` is not discarded or merged, it becomes `prev` for the next
245 some_curr: Option<CoverageSpan>,
247 /// The original `span` for `curr`, in case `curr.span()` is modified. The `curr_original_span`
248 /// **must not be mutated** (except when advancing to the next `curr`), even if `curr.span()`
250 curr_original_span: Span,
252 /// The CoverageSpan from a prior iteration; typically assigned from that iteration's `curr`.
253 /// If that `curr` was discarded, `prev` retains its value from the previous iteration.
254 some_prev: Option<CoverageSpan>,
256 /// Assigned from `curr_original_span` from the previous iteration. The `prev_original_span`
257 /// **must not be mutated** (except when advancing to the next `prev`), even if `prev.span()`
259 prev_original_span: Span,
261 /// A copy of the expn_span from the prior iteration.
262 prev_expn_span: Option<Span>,
264 /// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and
265 /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list.
266 /// If a new `curr` span also fits this criteria (compared to an existing list of
267 /// `pending_dups`), that `curr` `CoverageSpan` moves to `prev` before possibly being added to
268 /// the `pending_dups` list, on the next iteration. As a result, if `prev` and `pending_dups`
269 /// have the same `Span`, the criteria for `pending_dups` holds for `prev` as well: a `prev`
270 /// with a matching `Span` does not dominate any `pending_dup` and no `pending_dup` dominates a
271 /// `prev` with a matching `Span`)
272 pending_dups: Vec<CoverageSpan>,
274 /// The final `CoverageSpan`s to add to the coverage map. A `Counter` or `Expression`
275 /// will also be injected into the MIR for each `CoverageSpan`.
276 refined_spans: Vec<CoverageSpan>,
279 impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
280 /// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be
283 /// The basic steps are:
285 /// 1. Extract an initial set of spans from the `Statement`s and `Terminator`s of each
286 /// `BasicCoverageBlockData`.
287 /// 2. Sort the spans by span.lo() (starting position). Spans that start at the same position
288 /// are sorted with longer spans before shorter spans; and equal spans are sorted
289 /// (deterministically) based on "dominator" relationship (if any).
290 /// 3. Traverse the spans in sorted order to identify spans that can be dropped (for instance,
291 /// if another span or spans are already counting the same code region), or should be merged
292 /// into a broader combined span (because it represents a contiguous, non-branching, and
293 /// uninterrupted region of source code).
295 /// Closures are exposed in their enclosing functions as `Assign` `Rvalue`s, and since
296 /// closures have their own MIR, their `Span` in their enclosing function should be left
299 /// Note the resulting vector of `CoverageSpan`s may not be fully sorted (and does not need
301 pub(super) fn generate_coverage_spans(
302 mir_body: &'a mir::Body<'tcx>,
303 fn_sig_span: Span, // Ensured to be same SourceFile and SyntaxContext as `body_span`
305 basic_coverage_blocks: &'a CoverageGraph,
306 ) -> Vec<CoverageSpan> {
307 let mut coverage_spans = CoverageSpans {
311 basic_coverage_blocks,
312 sorted_spans_iter: None,
313 refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
315 curr_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
317 prev_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
318 prev_expn_span: None,
319 pending_dups: Vec::new(),
322 let sorted_spans = coverage_spans.mir_to_initial_sorted_coverage_spans();
324 coverage_spans.sorted_spans_iter = Some(sorted_spans.into_iter());
326 coverage_spans.to_refined_spans()
329 fn mir_to_initial_sorted_coverage_spans(&self) -> Vec<CoverageSpan> {
330 let mut initial_spans = Vec::<CoverageSpan>::with_capacity(self.mir_body.num_nodes() * 2);
331 for (bcb, bcb_data) in self.basic_coverage_blocks.iter_enumerated() {
332 initial_spans.extend(self.bcb_to_initial_coverage_spans(bcb, bcb_data));
335 if initial_spans.is_empty() {
336 // This can happen if, for example, the function is unreachable (contains only a
337 // `BasicBlock`(s) with an `Unreachable` terminator).
338 return initial_spans;
341 initial_spans.push(CoverageSpan::for_fn_sig(self.fn_sig_span));
343 initial_spans.sort_unstable_by(|a, b| {
344 if a.span.lo() == b.span.lo() {
345 if a.span.hi() == b.span.hi() {
346 if a.is_in_same_bcb(b) {
347 Some(Ordering::Equal)
349 // Sort equal spans by dominator relationship, in reverse order (so
350 // dominators always come after the dominated equal spans). When later
351 // comparing two spans in order, the first will either dominate the second,
352 // or they will have no dominator relationship.
353 self.basic_coverage_blocks.dominators().rank_partial_cmp(b.bcb, a.bcb)
356 // Sort hi() in reverse order so shorter spans are attempted after longer spans.
357 // This guarantees that, if a `prev` span overlaps, and is not equal to, a
358 // `curr` span, the prev span either extends further left of the curr span, or
359 // they start at the same position and the prev span extends further right of
360 // the end of the curr span.
361 b.span.hi().partial_cmp(&a.span.hi())
364 a.span.lo().partial_cmp(&b.span.lo())
372 /// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and
373 /// de-duplicated `CoverageSpan`s.
374 fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
375 while self.next_coverage_span() {
376 if self.some_prev.is_none() {
377 debug!(" initial span");
378 self.check_invoked_macro_name_span();
379 } else if self.curr().is_mergeable(self.prev()) {
380 debug!(" same bcb (and neither is a closure), merge with prev={:?}", self.prev());
381 let prev = self.take_prev();
382 self.curr_mut().merge_from(prev);
383 self.check_invoked_macro_name_span();
384 // Note that curr.span may now differ from curr_original_span
385 } else if self.prev_ends_before_curr() {
387 " different bcbs and disjoint spans, so keep curr for next iter, and add \
391 let prev = self.take_prev();
392 self.push_refined_span(prev);
393 self.check_invoked_macro_name_span();
394 } else if self.prev().is_closure {
395 // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
398 " curr overlaps a closure (prev). Drop curr and keep prev for next iter. \
403 } else if self.curr().is_closure {
404 self.carve_out_span_for_closure();
405 } else if self.prev_original_span == self.curr().span {
406 // Note that this compares the new (`curr`) span to `prev_original_span`.
407 // In this branch, the actual span byte range of `prev_original_span` is not
408 // important. What is important is knowing whether the new `curr` span was
409 // **originally** the same as the original span of `prev()`. The original spans
410 // reflect their original sort order, and for equal spans, conveys a partial
411 // ordering based on CFG dominator priority.
412 if self.prev().is_macro_expansion() && self.curr().is_macro_expansion() {
413 // Macros that expand to include branching (such as
414 // `assert_eq!()`, `assert_ne!()`, `info!()`, `debug!()`, or
415 // `trace!()) typically generate callee spans with identical
416 // ranges (typically the full span of the macro) for all
417 // `BasicBlocks`. This makes it impossible to distinguish
418 // the condition (`if val1 != val2`) from the optional
419 // branched statements (such as the call to `panic!()` on
420 // assert failure). In this case it is better (or less
421 // worse) to drop the optional branch bcbs and keep the
422 // non-conditional statements, to count when reached.
424 " curr and prev are part of a macro expansion, and curr has the same span \
425 as prev, but is in a different bcb. Drop curr and keep prev for next iter. \
431 self.hold_pending_dups_unless_dominated();
434 self.cutoff_prev_at_overlapping_curr();
435 self.check_invoked_macro_name_span();
439 debug!(" AT END, adding last prev={:?}", self.prev());
440 let prev = self.take_prev();
441 let pending_dups = self.pending_dups.split_off(0);
442 for dup in pending_dups {
443 debug!(" ...adding at least one pending dup={:?}", dup);
444 self.push_refined_span(dup);
447 // Async functions wrap a closure that implements the body to be executed. The enclosing
448 // function is called and returns an `impl Future` without initially executing any of the
449 // body. To avoid showing the return from the enclosing function as a "covered" return from
450 // the closure, the enclosing function's `TerminatorKind::Return`s `CoverageSpan` is
451 // excluded. The closure's `Return` is the only one that will be counted. This provides
452 // adequate coverage, and more intuitive counts. (Avoids double-counting the closing brace
453 // of the function body.)
454 let body_ends_with_closure = if let Some(last_covspan) = self.refined_spans.last() {
455 last_covspan.is_closure && last_covspan.span.hi() == self.body_span.hi()
460 if !body_ends_with_closure {
461 self.push_refined_span(prev);
464 // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage
465 // regions for the current function leave room for the closure's own coverage regions
466 // (injected separately, from the closure's own MIR).
467 self.refined_spans.retain(|covspan| !covspan.is_closure);
471 fn push_refined_span(&mut self, covspan: CoverageSpan) {
472 let len = self.refined_spans.len();
474 let last = &mut self.refined_spans[len - 1];
475 if last.is_mergeable(&covspan) {
477 "merging new refined span with last refined span, last={:?}, covspan={:?}",
480 last.merge_from(covspan);
484 self.refined_spans.push(covspan)
487 fn check_invoked_macro_name_span(&mut self) {
488 if let Some(visible_macro) = self.curr().visible_macro(self.body_span) {
489 if self.prev_expn_span.map_or(true, |prev_expn_span| {
490 self.curr().expn_span.ctxt() != prev_expn_span.ctxt()
492 let merged_prefix_len = self.curr_original_span.lo() - self.curr().span.lo();
493 let after_macro_bang =
494 merged_prefix_len + BytePos(visible_macro.as_str().bytes().count() as u32 + 1);
495 let mut macro_name_cov = self.curr().clone();
496 self.curr_mut().span =
497 self.curr().span.with_lo(self.curr().span.lo() + after_macro_bang);
498 macro_name_cov.span =
499 macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
501 " and curr starts a new macro expansion, so add a new span just for \
502 the macro `{}!`, new span={:?}",
503 visible_macro, macro_name_cov
505 self.push_refined_span(macro_name_cov);
510 // Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
511 // the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
512 // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
513 // merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
514 // `Statement`s and/or `Terminator`s.)
515 fn bcb_to_initial_coverage_spans(
517 bcb: BasicCoverageBlock,
518 bcb_data: &'a BasicCoverageBlockData,
519 ) -> Vec<CoverageSpan> {
524 let data = &self.mir_body[bb];
528 .filter_map(move |(index, statement)| {
529 filtered_statement_span(statement).map(|span| {
530 CoverageSpan::for_statement(
532 function_source_span(span, self.body_span),
540 .chain(filtered_terminator_span(data.terminator()).map(|span| {
541 CoverageSpan::for_terminator(
542 function_source_span(span, self.body_span),
552 fn curr(&self) -> &CoverageSpan {
555 .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
558 fn curr_mut(&mut self) -> &mut CoverageSpan {
561 .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
564 fn prev(&self) -> &CoverageSpan {
567 .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
570 fn prev_mut(&mut self) -> &mut CoverageSpan {
573 .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
576 fn take_prev(&mut self) -> CoverageSpan {
577 self.some_prev.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
580 /// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the
581 /// `pending_dups` spans), then one of the following two things happened during the previous
583 /// * the previous `curr` span (which is now `prev`) was not a duplicate of the pending_dups
584 /// (in which case there should be at least two spans in `pending_dups`); or
585 /// * the `span` of `prev` was modified by `curr_mut().merge_from(prev)` (in which case
586 /// `pending_dups` could have as few as one span)
587 /// In either case, no more spans will match the span of `pending_dups`, so
588 /// add the `pending_dups` if they don't overlap `curr`, and clear the list.
589 fn check_pending_dups(&mut self) {
590 if let Some(dup) = self.pending_dups.last() {
591 if dup.span != self.prev().span {
593 " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
594 previous iteration, or prev started a new disjoint span"
596 if dup.span.hi() <= self.curr().span.lo() {
597 let pending_dups = self.pending_dups.split_off(0);
598 for dup in pending_dups.into_iter() {
599 debug!(" ...adding at least one pending={:?}", dup);
600 self.push_refined_span(dup);
603 self.pending_dups.clear();
609 /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
610 fn next_coverage_span(&mut self) -> bool {
611 if let Some(curr) = self.some_curr.take() {
612 self.prev_expn_span = Some(curr.expn_span);
613 self.some_prev = Some(curr);
614 self.prev_original_span = self.curr_original_span;
616 while let Some(curr) = self.sorted_spans_iter.as_mut().unwrap().next() {
617 debug!("FOR curr={:?}", curr);
618 if self.some_prev.is_some() && self.prev_starts_after_next(&curr) {
620 " prev.span starts after curr.span, so curr will be dropped (skipping past \
621 closure?); prev={:?}",
625 // Save a copy of the original span for `curr` in case the `CoverageSpan` is changed
626 // by `self.curr_mut().merge_from(prev)`.
627 self.curr_original_span = curr.span;
628 self.some_curr.replace(curr);
629 self.check_pending_dups();
636 /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
637 /// `curr` coverage span.
638 fn take_curr(&mut self) -> CoverageSpan {
639 self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
642 /// Returns true if the curr span should be skipped because prev has already advanced beyond the
643 /// end of curr. This can only happen if a prior iteration updated `prev` to skip past a region
644 /// of code, such as skipping past a closure.
645 fn prev_starts_after_next(&self, next_curr: &CoverageSpan) -> bool {
646 self.prev().span.lo() > next_curr.span.lo()
649 /// Returns true if the curr span starts past the end of the prev span, which means they don't
650 /// overlap, so we now know the prev can be added to the refined coverage spans.
651 fn prev_ends_before_curr(&self) -> bool {
652 self.prev().span.hi() <= self.curr().span.lo()
655 /// If `prev`s span extends left of the closure (`curr`), carve out the closure's span from
656 /// `prev`'s span. (The closure's coverage counters will be injected when processing the
657 /// closure's own MIR.) Add the portion of the span to the left of the closure; and if the span
658 /// extends to the right of the closure, update `prev` to that portion of the span. For any
659 /// `pending_dups`, repeat the same process.
660 fn carve_out_span_for_closure(&mut self) {
661 let curr_span = self.curr().span;
662 let left_cutoff = curr_span.lo();
663 let right_cutoff = curr_span.hi();
664 let has_pre_closure_span = self.prev().span.lo() < right_cutoff;
665 let has_post_closure_span = self.prev().span.hi() > right_cutoff;
666 let mut pending_dups = self.pending_dups.split_off(0);
667 if has_pre_closure_span {
668 let mut pre_closure = self.prev().clone();
669 pre_closure.span = pre_closure.span.with_hi(left_cutoff);
670 debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);
671 if !pending_dups.is_empty() {
672 for mut dup in pending_dups.iter().cloned() {
673 dup.span = dup.span.with_hi(left_cutoff);
674 debug!(" ...and at least one pre_closure dup={:?}", dup);
675 self.push_refined_span(dup);
678 self.push_refined_span(pre_closure);
680 if has_post_closure_span {
681 // Mutate `prev.span()` to start after the closure (and discard curr).
682 // (**NEVER** update `prev_original_span` because it affects the assumptions
683 // about how the `CoverageSpan`s are ordered.)
684 self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
685 debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev());
686 for dup in pending_dups.iter_mut() {
687 debug!(" ...and at least one overlapping dup={:?}", dup);
688 dup.span = dup.span.with_lo(right_cutoff);
690 self.pending_dups.append(&mut pending_dups);
691 let closure_covspan = self.take_curr();
692 self.push_refined_span(closure_covspan); // since self.prev() was already updated
694 pending_dups.clear();
698 /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
699 /// `pending_dups` spans, if any). Keep in mind, `prev.span()` may have been changed.
700 /// If prev.span() was merged into other spans (with matching BCB, for instance),
701 /// `prev.span.hi()` will be greater than (further right of) `prev_original_span.hi()`.
702 /// If prev.span() was split off to the right of a closure, prev.span().lo() will be
703 /// greater than prev_original_span.lo(). The actual span of `prev_original_span` is
704 /// not as important as knowing that `prev()` **used to have the same span** as `curr(),
705 /// which means their sort order is still meaningful for determinating the dominator
708 /// When two `CoverageSpan`s have the same `Span`, dominated spans can be discarded; but if
709 /// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held,
710 /// until their disposition is determined. In this latter case, the `prev` dup is moved into
711 /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration.
712 fn hold_pending_dups_unless_dominated(&mut self) {
713 // Equal coverage spans are ordered by dominators before dominated (if any), so it should be
714 // impossible for `curr` to dominate any previous `CoverageSpan`.
715 debug_assert!(!self.span_bcb_is_dominated_by(self.prev(), self.curr()));
717 let initial_pending_count = self.pending_dups.len();
718 if initial_pending_count > 0 {
719 let mut pending_dups = self.pending_dups.split_off(0);
720 pending_dups.retain(|dup| !self.span_bcb_is_dominated_by(self.curr(), dup));
721 self.pending_dups.append(&mut pending_dups);
722 if self.pending_dups.len() < initial_pending_count {
724 " discarded {} of {} pending_dups that dominated curr",
725 initial_pending_count - self.pending_dups.len(),
726 initial_pending_count
731 if self.span_bcb_is_dominated_by(self.curr(), self.prev()) {
733 " different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}",
736 self.cutoff_prev_at_overlapping_curr();
737 // If one span dominates the other, assocate the span with the code from the dominated
738 // block only (`curr`), and discard the overlapping portion of the `prev` span. (Note
739 // that if `prev.span` is wider than `prev_original_span`, a `CoverageSpan` will still
740 // be created for `prev`s block, for the non-overlapping portion, left of `curr.span`.)
744 // x if x < 1 => { ... }
747 // The span for the first `x` is referenced by both the pattern block (every time it is
748 // evaluated) and the arm code (only when matched). The counter will be applied only to
749 // the dominated block. This allows coverage to track and highlight things like the
750 // assignment of `x` above, if the branch is matched, making `x` available to the arm
751 // code; and to track and highlight the question mark `?` "try" operator at the end of
752 // a function call returning a `Result`, so the `?` is covered when the function returns
753 // an `Err`, and not counted as covered if the function always returns `Ok`.
755 // Save `prev` in `pending_dups`. (`curr` will become `prev` in the next iteration.)
756 // If the `curr` CoverageSpan is later discarded, `pending_dups` can be discarded as
757 // well; but if `curr` is added to refined_spans, the `pending_dups` will also be added.
759 " different bcbs but SAME spans, and neither dominates, so keep curr for \
760 next iter, and, pending upcoming spans (unless overlapping) add prev={:?}",
763 let prev = self.take_prev();
764 self.pending_dups.push(prev);
768 /// `curr` overlaps `prev`. If `prev`s span extends left of `curr`s span, keep _only_
769 /// statements that end before `curr.lo()` (if any), and add the portion of the
770 /// combined span for those statements. Any other statements have overlapping spans
771 /// that can be ignored because `curr` and/or other upcoming statements/spans inside
772 /// the overlap area will produce their own counters. This disambiguation process
773 /// avoids injecting multiple counters for overlapping spans, and the potential for
775 fn cutoff_prev_at_overlapping_curr(&mut self) {
777 " different bcbs, overlapping spans, so ignore/drop pending and only add prev \
778 if it has statements that end before curr; prev={:?}",
781 if self.pending_dups.is_empty() {
782 let curr_span = self.curr().span;
783 self.prev_mut().cutoff_statements_at(curr_span.lo());
784 if self.prev().coverage_statements.is_empty() {
785 debug!(" ... no non-overlapping statements to add");
787 debug!(" ... adding modified prev={:?}", self.prev());
788 let prev = self.take_prev();
789 self.push_refined_span(prev);
792 // with `pending_dups`, `prev` cannot have any statements that don't overlap
793 self.pending_dups.clear();
797 fn span_bcb_is_dominated_by(&self, covspan: &CoverageSpan, dom_covspan: &CoverageSpan) -> bool {
798 self.basic_coverage_blocks.is_dominated_by(covspan.bcb, dom_covspan.bcb)
802 /// If the MIR `Statement` has a span contributive to computing coverage spans,
803 /// return it; otherwise return `None`.
804 pub(super) fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
805 match statement.kind {
806 // These statements have spans that are often outside the scope of the executed source code
807 // for their parent `BasicBlock`.
808 StatementKind::StorageLive(_)
809 | StatementKind::StorageDead(_)
810 // Coverage should not be encountered, but don't inject coverage coverage
811 | StatementKind::Coverage(_)
813 | StatementKind::Nop => None,
815 // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead`
816 // statements be more consistent?
818 // FakeReadCause::ForGuardBinding, in this example:
820 // x if x < 1 => { ... }
822 // The BasicBlock within the match arm code included one of these statements, but the span
823 // for it covered the `1` in this source. The actual statements have nothing to do with that
825 // FakeRead(ForGuardBinding, _4);
827 // _4 = &_1; (at the span for the first `x`)
828 // and `_1` is the `Place` for `somenum`.
830 // If and when the Issue is resolved, remove this special case match pattern:
831 StatementKind::FakeRead(box (cause, _)) if cause == FakeReadCause::ForGuardBinding => None,
833 // Retain spans from all other statements
834 StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
835 | StatementKind::CopyNonOverlapping(..)
836 | StatementKind::Assign(_)
837 | StatementKind::SetDiscriminant { .. }
838 | StatementKind::Retag(_, _)
839 | StatementKind::AscribeUserType(_, _) => {
840 Some(statement.source_info.span)
845 /// If the MIR `Terminator` has a span contributive to computing coverage spans,
846 /// return it; otherwise return `None`.
847 pub(super) fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
848 match terminator.kind {
849 // These terminators have spans that don't positively contribute to computing a reasonable
850 // span of actually executed source code. (For example, SwitchInt terminators extracted from
851 // an `if condition { block }` has a span that includes the executed block, if true,
852 // but for coverage, the code region executed, up to *and* through the SwitchInt,
853 // actually stops before the if's block.)
854 TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG
855 | TerminatorKind::Assert { .. }
856 | TerminatorKind::Drop { .. }
857 | TerminatorKind::DropAndReplace { .. }
858 | TerminatorKind::SwitchInt { .. }
859 // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`.
860 | TerminatorKind::FalseEdge { .. }
861 | TerminatorKind::Goto { .. } => None,
863 // Call `func` operand can have a more specific span when part of a chain of calls
864 | TerminatorKind::Call { ref func, .. } => {
865 let mut span = terminator.source_info.span;
866 if let mir::Operand::Constant(box constant) = func {
867 if constant.span.lo() > span.lo() {
868 span = span.with_lo(constant.span.lo());
874 // Retain spans from all other terminators
875 TerminatorKind::Resume
876 | TerminatorKind::Abort
877 | TerminatorKind::Return
878 | TerminatorKind::Yield { .. }
879 | TerminatorKind::GeneratorDrop
880 | TerminatorKind::FalseUnwind { .. }
881 | TerminatorKind::InlineAsm { .. } => {
882 Some(terminator.source_info.span)
887 /// Returns an extrapolated span (pre-expansion[^1]) corresponding to a range
888 /// within the function's body source. This span is guaranteed to be contained
889 /// within, or equal to, the `body_span`. If the extrapolated span is not
890 /// contained within the `body_span`, the `body_span` is returned.
892 /// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
895 pub(super) fn function_source_span(span: Span, body_span: Span) -> Span {
896 let original_span = original_sp(span, body_span).with_ctxt(body_span.ctxt());
897 if body_span.contains(original_span) { original_span } else { body_span }