//! S(c, (r_1, p_2, .., p_n))
//! S(c, (r_2, p_2, .., p_n))
//!
-//! 2. We can pop a wildcard off the top of the stack. This is called `D(p)`, where `p` is
-//! a pattern-stack.
+//! 2. We can pop a wildcard off the top of the stack. This is called `S(_, p)`, where `p` is
+//! a pattern-stack. Note: the paper calls this `D(p)`.
//! This is used when we know there are missing constructor cases, but there might be
//! existing wildcard patterns, so to check the usefulness of the matrix, we have to check
//! all its *other* components.
//! p_2, .., p_n
//! 2.3. `p_1 = r_1 | r_2`. We expand the OR-pattern and then recurse on each resulting
//! stack.
-//! D((r_1, p_2, .., p_n))
-//! D((r_2, p_2, .., p_n))
+//! S(_, (r_1, p_2, .., p_n))
+//! S(_, (r_2, p_2, .., p_n))
//!
//! Note that the OR-patterns are not always used directly in Rust, but are used to derive the
//! exhaustive integer matching rules, so they're written here for posterity.
//! That's almost correct, but only works if there were no wildcards in those first
//! components. So we need to check that `p` is useful with respect to the rows that
//! start with a wildcard, if there are any. This is where `D` comes in:
-//! `U(P, p) := U(D(P), D(p))`
+//! `U(P, p) := U(S(_, P), S(_, p))`
//!
//! For example, if `P` is:
//!
//! disjunction over every range. This is a bit more tricky to deal with: essentially we need
//! to form equivalence classes of subranges of the constructor range for which the behaviour
//! of the matrix `P` and new pattern `p` are the same. This is described in more
-//! detail in `split_grouped_constructors`.
+//! detail in `Constructor::split`.
//! + If some constructors are missing from the matrix, it turns out we don't need to do
//! anything special (because we know none of the integers are actually wildcards: i.e., we
//! can't span wildcards using ranges).
PatStack(vec)
}
- fn from_slice(s: &[&'p Pat<'tcx>]) -> Self {
- PatStack(SmallVec::from_slice(s))
- }
-
fn is_empty(&self) -> bool {
self.0.is_empty()
}
self.0[0]
}
- fn to_tail(&self) -> Self {
- PatStack::from_slice(&self.0[1..])
- }
-
fn iter(&self) -> impl Iterator<Item = &Pat<'tcx>> {
self.0.iter().copied()
}
}
}
- /// This computes `D(self)`. See top of the file for explanations.
- fn specialize_wildcard(&self) -> Option<Self> {
- if self.head().is_wildcard() { Some(self.to_tail()) } else { None }
- }
-
/// This computes `S(constructor, self)`. See top of the file for explanations.
+ ///
+ /// This is the main specialization step. It expands the pattern
+ /// into `arity` patterns based on the constructor. For most patterns, the step is trivial,
+ /// for instance tuple patterns are flattened and box patterns expand into their inner pattern.
+ /// Returns `None` if the pattern does not have the given constructor.
+ ///
+ /// OTOH, slice patterns with a subslice pattern (tail @ ..) can be expanded into multiple
+ /// different patterns.
+ /// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
+ /// fields filled with wild patterns.
+ ///
+ /// This is roughly the inverse of `Constructor::apply`.
fn specialize_constructor(
&self,
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
- constructor: &Constructor<'tcx>,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ ctor: &Constructor<'tcx>,
ctor_wild_subpatterns: &Fields<'p, 'tcx>,
is_my_head_ctor: bool,
) -> Option<PatStack<'p, 'tcx>> {
- let new_fields = specialize_one_pattern(
- cx,
+ // We return `None` if `ctor` is not covered by `self.head()`. If `ctor` is known to be
+ // derived from `self.head()`, then we don't need to check; otherwise, we compute the
+ // constructor of `self.head()` and check for constructor inclusion.
+ // Note that this shortcut is also necessary for correctness: a pattern should always be
+ // specializable with its own constructor, even in cases where we refuse to inspect values like
+ // opaque constants.
+ if !is_my_head_ctor {
+ let head_ctor = pat_constructor(cx.tcx, cx.param_env, self.head());
+ if !ctor.is_covered_by(cx, &head_ctor, self.head().ty) {
+ return None;
+ }
+ }
+ let new_fields = ctor_wild_subpatterns.replace_with_pattern_arguments(self.head());
+
+ debug!(
+ "specialize_constructor({:#?}, {:#?}, {:#?}) = {:#?}",
self.head(),
- constructor,
+ ctor,
ctor_wild_subpatterns,
- is_my_head_ctor,
- )?;
+ new_fields
+ );
+
+ // We pop the head pattern and push the new fields extracted from the arguments of
+ // `self.head()`.
Some(new_fields.push_on_patstack(&self.0[1..]))
}
}
/// so it is possible to precompute the result of `Matrix::specialize_constructor` at a
/// lower computational complexity.
/// `lookup` is responsible for holding the precomputed result of
- /// `Matrix::specialize_constructor`, while `wilds` is used for two purposes: the first one is
- /// the precomputed result of `Matrix::specialize_wildcard`, and the second is to be used as a
+ /// specialization, while `wilds` is used for two purposes: the first one is
+ /// the precomputed result of specialization with a wildcard, and the second is to be used as a
/// fallback for `Matrix::specialize_constructor` when it tries to apply a constructor that
/// has not been seen in the `Matrix`. See `update_cache` for further explanations.
Variants { lookup: FxHashMap<DefId, SmallVec<[usize; 1]>>, wilds: SmallVec<[usize; 1]> },
v.push(idx);
}
// Per rule 2.1 and 2.2 in the top-level comments, only wildcard patterns
- // are included in the result of `specialize_wildcard`.
+ // are included in the result of specialization with a wildcard.
// What we do here is to track the wildcards we have seen; so in addition to
- // acting as the precomputed result of `specialize_wildcard`, `wilds` also
+ // acting as the precomputed result of specialization with a wildcard, `wilds` also
// serves as the default value of `specialize_constructor` for constructors
// that are not in `lookup`.
wilds.push(idx);
self.patterns.iter().map(|r| r.head())
}
- /// This computes `D(self)`. See top of the file for explanations.
- fn specialize_wildcard(&self) -> Self {
- match &self.cache {
- SpecializationCache::Variants { wilds, .. } => {
- let result =
- wilds.iter().filter_map(|&i| self.patterns[i].specialize_wildcard()).collect();
- // When debug assertions are enabled, check the results against the "slow path"
- // result.
- debug_assert_eq!(
- result,
- Self {
- patterns: self.patterns.clone(),
- cache: SpecializationCache::Incompatible
- }
- .specialize_wildcard()
- );
- result
- }
- SpecializationCache::Incompatible => {
- self.patterns.iter().filter_map(|r| r.specialize_wildcard()).collect()
- }
- }
- }
-
/// This computes `S(constructor, self)`. See top of the file for explanations.
fn specialize_constructor(
&self,
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
constructor: &Constructor<'tcx>,
ctor_wild_subpatterns: &Fields<'p, 'tcx>,
) -> Matrix<'p, 'tcx> {
match &self.cache {
SpecializationCache::Variants { lookup, wilds } => {
- let result: Self = if let Constructor::Variant(id) = constructor {
+ let cached = if let Constructor::Variant(id) = constructor {
lookup
.get(id)
// Default to `wilds` for absent keys. See `update_cache` for an explanation.
.unwrap_or(&wilds)
- .iter()
- .filter_map(|&i| {
- self.patterns[i].specialize_constructor(
- cx,
- constructor,
- ctor_wild_subpatterns,
- false,
- )
- })
- .collect()
+ } else if let Wildcard = constructor {
+ &wilds
} else {
- unreachable!()
+ bug!(
+ "unexpected constructor encountered while dealing with matrix cache: {:?}",
+ constructor
+ );
};
+ let result: Self = cached
+ .iter()
+ .filter_map(|&i| {
+ self.patterns[i].specialize_constructor(
+ cx,
+ constructor,
+ ctor_wild_subpatterns,
+ false,
+ )
+ })
+ .collect();
// When debug assertions are enabled, check the results against the "slow path"
// result.
debug_assert_eq!(
fn arity(self) -> u64 {
self.pattern_kind().arity()
}
+
+ /// The exhaustiveness-checking paper does not include any details on
+ /// checking variable-length slice patterns. However, they are matched
+ /// by an infinite collection of fixed-length array patterns.
+ ///
+ /// Checking the infinite set directly would take an infinite amount
+ /// of time. However, it turns out that for each finite set of
+ /// patterns `P`, all sufficiently large array lengths are equivalent:
+ ///
+ /// Each slice `s` with a "sufficiently-large" length `l ≥ L` that applies
+ /// to exactly the subset `Pₜ` of `P` can be transformed to a slice
+ /// `sₘ` for each sufficiently-large length `m` that applies to exactly
+ /// the same subset of `P`.
+ ///
+ /// Because of that, each witness for reachability-checking from one
+ /// of the sufficiently-large lengths can be transformed to an
+ /// equally-valid witness from any other length, so we only have
+ /// to check slice lengths from the "minimal sufficiently-large length"
+ /// and below.
+ ///
+ /// Note that the fact that there is a *single* `sₘ` for each `m`
+ /// not depending on the specific pattern in `P` is important: if
+ /// you look at the pair of patterns
+ /// `[true, ..]`
+ /// `[.., false]`
+ /// Then any slice of length ≥1 that matches one of these two
+ /// patterns can be trivially turned to a slice of any
+ /// other length ≥1 that matches them and vice-versa - for
+ /// but the slice from length 2 `[false, true]` that matches neither
+ /// of these patterns can't be turned to a slice from length 1 that
+ /// matches neither of these patterns, so we have to consider
+ /// slices from length 2 there.
+ ///
+ /// Now, to see that that length exists and find it, observe that slice
+ /// patterns are either "fixed-length" patterns (`[_, _, _]`) or
+ /// "variable-length" patterns (`[_, .., _]`).
+ ///
+ /// For fixed-length patterns, all slices with lengths *longer* than
+ /// the pattern's length have the same outcome (of not matching), so
+ /// as long as `L` is greater than the pattern's length we can pick
+ /// any `sₘ` from that length and get the same result.
+ ///
+ /// For variable-length patterns, the situation is more complicated,
+ /// because as seen above the precise value of `sₘ` matters.
+ ///
+ /// However, for each variable-length pattern `p` with a prefix of length
+ /// `plₚ` and suffix of length `slₚ`, only the first `plₚ` and the last
+ /// `slₚ` elements are examined.
+ ///
+ /// Therefore, as long as `L` is positive (to avoid concerns about empty
+ /// types), all elements after the maximum prefix length and before
+ /// the maximum suffix length are not examined by any variable-length
+ /// pattern, and therefore can be added/removed without affecting
+ /// them - creating equivalent patterns from any sufficiently-large
+ /// length.
+ ///
+ /// Of course, if fixed-length patterns exist, we must be sure
+ /// that our length is large enough to miss them all, so
+ /// we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
+ ///
+ /// for example, with the above pair of patterns, all elements
+ /// but the first and last can be added/removed, so any
+ /// witness of length ≥2 (say, `[false, false, true]`) can be
+ /// turned to a witness from any other length ≥2.
+ fn split<'p, 'tcx>(
+ self,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ matrix: &Matrix<'p, 'tcx>,
+ ) -> SmallVec<[Constructor<'tcx>; 1]> {
+ let (array_len, self_prefix, self_suffix) = match self {
+ Slice { array_len, kind: VarLen(self_prefix, self_suffix) } => {
+ (array_len, self_prefix, self_suffix)
+ }
+ _ => return smallvec![Slice(self)],
+ };
+
+ let head_ctors = matrix
+ .heads()
+ .map(|p| pat_constructor(cx.tcx, cx.param_env, p))
+ .filter(|c| !c.is_wildcard());
+
+ let mut max_prefix_len = self_prefix;
+ let mut max_suffix_len = self_suffix;
+ let mut max_fixed_len = 0;
+
+ for ctor in head_ctors {
+ if let Slice(slice) = ctor {
+ match slice.pattern_kind() {
+ FixedLen(len) => {
+ max_fixed_len = cmp::max(max_fixed_len, len);
+ }
+ VarLen(prefix, suffix) => {
+ max_prefix_len = cmp::max(max_prefix_len, prefix);
+ max_suffix_len = cmp::max(max_suffix_len, suffix);
+ }
+ }
+ }
+ }
+
+ // For diagnostics, we keep the prefix and suffix lengths separate, so in the case
+ // where `max_fixed_len + 1` is the largest, we adapt `max_prefix_len` accordingly,
+ // so that `L = max_prefix_len + max_suffix_len`.
+ if max_fixed_len + 1 >= max_prefix_len + max_suffix_len {
+ // The subtraction can't overflow thanks to the above check.
+ // The new `max_prefix_len` is also guaranteed to be larger than its previous
+ // value.
+ max_prefix_len = max_fixed_len + 1 - max_suffix_len;
+ }
+
+ match array_len {
+ Some(len) => {
+ let kind = if max_prefix_len + max_suffix_len < len {
+ VarLen(max_prefix_len, max_suffix_len)
+ } else {
+ FixedLen(len)
+ };
+ smallvec![Slice(Slice { array_len, kind })]
+ }
+ None => {
+ // `ctor` originally covered the range `(self_prefix +
+ // self_suffix..infinity)`. We now split it into two: lengths smaller than
+ // `max_prefix_len + max_suffix_len` are treated independently as
+ // fixed-lengths slices, and lengths above are captured by a final VarLen
+ // constructor.
+ let smaller_lengths =
+ (self_prefix + self_suffix..max_prefix_len + max_suffix_len).map(FixedLen);
+ let final_slice = VarLen(max_prefix_len, max_suffix_len);
+ smaller_lengths
+ .chain(Some(final_slice))
+ .map(|kind| Slice { array_len, kind })
+ .map(Slice)
+ .collect()
+ }
+ }
+ }
}
/// A value can be decomposed into a constructor applied to some fields. This struct represents
/// the constructor. See also `Fields`.
///
/// `pat_constructor` retrieves the constructor corresponding to a pattern.
-/// `specialize_one_pattern` returns the list of fields corresponding to a pattern, given a
+/// `specialize_constructor` returns the list of fields corresponding to a pattern, given a
/// constructor. `Constructor::apply` reconstructs the pattern from a pair of `Constructor` and
/// `Fields`.
#[derive(Clone, Debug, PartialEq)]
Opaque,
/// Fake extra constructor for enums that aren't allowed to be matched exhaustively.
NonExhaustive,
+ /// Wildcard pattern.
+ Wildcard,
}
impl<'tcx> Constructor<'tcx> {
+ fn is_wildcard(&self) -> bool {
+ match self {
+ Wildcard => true,
+ _ => false,
+ }
+ }
+
fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> VariantIdx {
match *self {
Variant(id) => adt.variant_index_with_id(id),
}
// This constructor is never covered by anything else
NonExhaustive => vec![NonExhaustive],
- Opaque => bug!("unexpected opaque ctor {:?} found in all_ctors", self),
+ Opaque => bug!("found unexpected opaque ctor in all_ctors"),
+ Wildcard => bug!("found unexpected wildcard ctor in all_ctors"),
+ }
+ }
+
+ /// Some constructors (namely IntRange and Slice) actually stand for a set of actual
+ /// constructors (integers and fixed-sized slices). When specializing for these
+ /// constructors, we want to be specialising for the actual underlying constructors.
+ /// Naively, we would simply return the list of constructors they correspond to. We instead are
+ /// more clever: if there are constructors that we know will behave the same wrt the current
+ /// matrix, we keep them grouped. For example, all slices of a sufficiently large length
+ /// will either be all useful or all non-useful with a given matrix.
+ ///
+ /// See the branches for details on how the splitting is done.
+ ///
+ /// This function may discard some irrelevant constructors if this preserves behavior and
+ /// diagnostics. Eg. for the `_` case, we ignore the constructors already present in the
+ /// matrix, unless all of them are.
+ ///
+ /// `hir_id` is `None` when we're evaluating the wildcard pattern. In that case we do not want
+ /// to lint for overlapping ranges.
+ fn split<'p>(
+ self,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ pcx: PatCtxt<'tcx>,
+ matrix: &Matrix<'p, 'tcx>,
+ hir_id: Option<HirId>,
+ ) -> SmallVec<[Self; 1]> {
+ debug!("Constructor::split({:#?}, {:#?})", self, matrix);
+
+ match self {
+ // Fast-track if the range is trivial. In particular, we don't do the overlapping
+ // ranges check.
+ IntRange(ctor_range)
+ if ctor_range.treat_exhaustively(cx.tcx) && !ctor_range.is_singleton() =>
+ {
+ ctor_range.split(cx, pcx, matrix, hir_id)
+ }
+ Slice(slice @ Slice { kind: VarLen(..), .. }) => slice.split(cx, matrix),
+ // Any other constructor can be used unchanged.
+ _ => smallvec![self],
+ }
+ }
+
+ /// Returns whether `self` is covered by `other`, ie whether `self` is a subset of `other`. For
+ /// the simple cases, this is simply checking for equality. For the "grouped" constructors,
+ /// this checks for inclusion.
+ fn is_covered_by<'p>(
+ &self,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ other: &Constructor<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> bool {
+ match (self, other) {
+ // Wildcards cover anything
+ (_, Wildcard) => true,
+ // Wildcards are only covered by wildcards
+ (Wildcard, _) => false,
+
+ (Single, Single) => true,
+ (Variant(self_id), Variant(other_id)) => self_id == other_id,
+
+ (IntRange(self_range), IntRange(other_range)) => {
+ if self_range.intersection(cx.tcx, other_range).is_some() {
+ // Constructor splitting should ensure that all intersections we encounter
+ // are actually inclusions.
+ assert!(self_range.is_subrange(other_range));
+ true
+ } else {
+ false
+ }
+ }
+ (
+ FloatRange(self_from, self_to, self_end),
+ FloatRange(other_from, other_to, other_end),
+ ) => {
+ match (
+ compare_const_vals(cx.tcx, self_to, other_to, cx.param_env, ty),
+ compare_const_vals(cx.tcx, self_from, other_from, cx.param_env, ty),
+ ) {
+ (Some(to), Some(from)) => {
+ (from == Ordering::Greater || from == Ordering::Equal)
+ && (to == Ordering::Less
+ || (other_end == self_end && to == Ordering::Equal))
+ }
+ _ => false,
+ }
+ }
+ (Str(self_val), Str(other_val)) => {
+ // FIXME: there's probably a more direct way of comparing for equality
+ match compare_const_vals(cx.tcx, self_val, other_val, cx.param_env, ty) {
+ Some(comparison) => comparison == Ordering::Equal,
+ None => false,
+ }
+ }
+
+ (Slice(self_slice), Slice(other_slice)) => {
+ other_slice.pattern_kind().covers_length(self_slice.arity())
+ }
+
+ // We are trying to inspect an opaque constant. Thus we skip the row.
+ (Opaque, _) | (_, Opaque) => false,
+ // Only a wildcard pattern can match the special extra constructor.
+ (NonExhaustive, _) => false,
+
+ _ => bug!("trying to compare incompatible constructors {:?} and {:?}", self, other),
}
}
/// Apply a constructor to a list of patterns, yielding a new pattern. `pats`
/// must have as many elements as this constructor's arity.
///
- /// This is roughly the inverse of `specialize_one_pattern`.
+ /// This is roughly the inverse of `specialize_constructor`.
///
/// Examples:
/// `self`: `Constructor::Single`
&FloatRange(lo, hi, end) => PatKind::Range(PatRange { lo, hi, end }),
IntRange(range) => return range.to_pat(cx.tcx),
NonExhaustive => PatKind::Wild,
- Opaque => bug!("we should not try to apply an opaque constructor {:?}", self),
+ Opaque => bug!("we should not try to apply an opaque constructor"),
+ Wildcard => bug!("we should not try to apply a wildcard constructor"),
};
Pat { ty, span: DUMMY_SP, kind: Box::new(pat) }
}
_ => bug!("bad slice pattern {:?} {:?}", constructor, ty),
},
- Str(..) | FloatRange(..) | IntRange(..) | NonExhaustive | Opaque => Fields::empty(),
+ Str(..) | FloatRange(..) | IntRange(..) | NonExhaustive | Opaque | Wildcard => {
+ Fields::empty()
+ }
};
debug!("Fields::wildcards({:?}, {:?}) = {:#?}", constructor, ty, ret);
ret
}
}
+ /// Replaces contained fields with the arguments of the given pattern. Only use on a pattern
+ /// that is compatible with the constructor used to build `self`.
+ /// This is meant to be used on the result of `Fields::wildcards()`. The idea is that
+ /// `wildcards` constructs a list of fields where all entries are wildcards, and the pattern
+ /// provided to this function fills some of the fields with non-wildcards.
+ /// In the following example `Fields::wildcards` would return `[_, _, _, _]`. If we call
+ /// `replace_with_pattern_arguments` on it with the pattern, the result will be `[Some(0), _,
+ /// _, _]`.
+ /// ```rust
+ /// let x: [Option<u8>; 4] = foo();
+ /// match x {
+ /// [Some(0), ..] => {}
+ /// }
+ /// ```
+ fn replace_with_pattern_arguments(&self, pat: &'p Pat<'tcx>) -> Self {
+ match pat.kind.as_ref() {
+ PatKind::Deref { subpattern } => Self::from_single_pattern(subpattern),
+ PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => {
+ self.replace_with_fieldpats(subpatterns)
+ }
+ PatKind::Array { prefix, suffix, .. } | PatKind::Slice { prefix, suffix, .. } => {
+ // Number of subpatterns for the constructor
+ let ctor_arity = self.len();
+
+ // Replace the prefix and the suffix with the given patterns, leaving wildcards in
+ // the middle if there was a subslice pattern `..`.
+ let prefix = prefix.iter().enumerate();
+ let suffix =
+ suffix.iter().enumerate().map(|(i, p)| (ctor_arity - suffix.len() + i, p));
+ self.replace_fields_indexed(prefix.chain(suffix))
+ }
+ _ => self.clone(),
+ }
+ }
+
fn push_on_patstack(self, stack: &[&'p Pat<'tcx>]) -> PatStack<'p, 'tcx> {
let pats: SmallVec<_> = match self {
Fields::Slice(pats) => pats.iter().chain(stack.iter().copied()).collect(),
/// Invariant: this returns an empty `Vec` if and only if the type is uninhabited (as determined by
/// `cx.is_uninhabited()`).
fn all_constructors<'a, 'tcx>(
- cx: &mut MatchCheckCtxt<'a, 'tcx>,
+ cx: &MatchCheckCtxt<'a, 'tcx>,
pcx: PatCtxt<'tcx>,
) -> Vec<Constructor<'tcx>> {
debug!("all_constructors({:?})", pcx.ty);
) -> Option<IntRange<'tcx>> {
// This MUST be kept in sync with `pat_constructor`.
match *pat.kind {
- PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
- PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
-
- PatKind::Binding { .. }
- | PatKind::Wild
- | PatKind::Leaf { .. }
- | PatKind::Deref { .. }
- | PatKind::Variant { .. }
- | PatKind::Array { .. }
- | PatKind::Slice { .. } => None,
-
PatKind::Constant { value } => Self::from_const(tcx, param_env, value, pat.span),
-
PatKind::Range(PatRange { lo, hi, end }) => {
let ty = lo.ty;
Self::from_range(
pat.span,
)
}
+ _ => None,
}
}
// This is a brand new pattern, so we don't reuse `self.span`.
Pat { ty: self.ty, span: DUMMY_SP, kind: Box::new(kind) }
}
+
+ /// For exhaustive integer matching, some constructors are grouped within other constructors
+ /// (namely integer typed values are grouped within ranges). However, when specialising these
+ /// constructors, we want to be specialising for the underlying constructors (the integers), not
+ /// the groups (the ranges). Thus we need to split the groups up. Splitting them up naïvely would
+ /// mean creating a separate constructor for every single value in the range, which is clearly
+ /// impractical. However, observe that for some ranges of integers, the specialisation will be
+ /// identical across all values in that range (i.e., there are equivalence classes of ranges of
+ /// constructors based on their `U(S(c, P), S(c, p))` outcome). These classes are grouped by
+ /// the patterns that apply to them (in the matrix `P`). We can split the range whenever the
+ /// patterns that apply to that range (specifically: the patterns that *intersect* with that range)
+ /// change.
+ /// Our solution, therefore, is to split the range constructor into subranges at every single point
+ /// the group of intersecting patterns changes (using the method described below).
+ /// And voilà! We're testing precisely those ranges that we need to, without any exhaustive matching
+ /// on actual integers. The nice thing about this is that the number of subranges is linear in the
+ /// number of rows in the matrix (i.e., the number of cases in the `match` statement), so we don't
+ /// need to be worried about matching over gargantuan ranges.
+ ///
+ /// Essentially, given the first column of a matrix representing ranges, looking like the following:
+ ///
+ /// |------| |----------| |-------| ||
+ /// |-------| |-------| |----| ||
+ /// |---------|
+ ///
+ /// We split the ranges up into equivalence classes so the ranges are no longer overlapping:
+ ///
+ /// |--|--|||-||||--||---|||-------| |-|||| ||
+ ///
+ /// The logic for determining how to split the ranges is fairly straightforward: we calculate
+ /// boundaries for each interval range, sort them, then create constructors for each new interval
+ /// between every pair of boundary points. (This essentially sums up to performing the intuitive
+ /// merging operation depicted above.)
+ fn split<'p>(
+ self,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ pcx: PatCtxt<'tcx>,
+ matrix: &Matrix<'p, 'tcx>,
+ hir_id: Option<HirId>,
+ ) -> SmallVec<[Constructor<'tcx>; 1]> {
+ let ty = pcx.ty;
+
+ /// Represents a border between 2 integers. Because the intervals spanning borders
+ /// must be able to cover every integer, we need to be able to represent
+ /// 2^128 + 1 such borders.
+ #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+ enum Border {
+ JustBefore(u128),
+ AfterMax,
+ }
+
+ // A function for extracting the borders of an integer interval.
+ fn range_borders(r: IntRange<'_>) -> impl Iterator<Item = Border> {
+ let (lo, hi) = r.range.into_inner();
+ let from = Border::JustBefore(lo);
+ let to = match hi.checked_add(1) {
+ Some(m) => Border::JustBefore(m),
+ None => Border::AfterMax,
+ };
+ vec![from, to].into_iter()
+ }
+
+ // Collect the span and range of all the intersecting ranges to lint on likely
+ // incorrect range patterns. (#63987)
+ let mut overlaps = vec![];
+ // `borders` is the set of borders between equivalence classes: each equivalence
+ // class lies between 2 borders.
+ let row_borders = matrix
+ .patterns
+ .iter()
+ .flat_map(|row| {
+ IntRange::from_pat(cx.tcx, cx.param_env, row.head()).map(|r| (r, row.len()))
+ })
+ .flat_map(|(range, row_len)| {
+ let intersection = self.intersection(cx.tcx, &range);
+ let should_lint = self.suspicious_intersection(&range);
+ if let (Some(range), 1, true) = (&intersection, row_len, should_lint) {
+ // FIXME: for now, only check for overlapping ranges on simple range
+ // patterns. Otherwise with the current logic the following is detected
+ // as overlapping:
+ // match (10u8, true) {
+ // (0 ..= 125, false) => {}
+ // (126 ..= 255, false) => {}
+ // (0 ..= 255, true) => {}
+ // }
+ overlaps.push(range.clone());
+ }
+ intersection
+ })
+ .flat_map(range_borders);
+ let self_borders = range_borders(self.clone());
+ let mut borders: Vec<_> = row_borders.chain(self_borders).collect();
+ borders.sort_unstable();
+
+ self.lint_overlapping_patterns(cx.tcx, hir_id, ty, overlaps);
+
+ // We're going to iterate through every adjacent pair of borders, making sure that
+ // each represents an interval of nonnegative length, and convert each such
+ // interval into a constructor.
+ borders
+ .array_windows()
+ .filter_map(|&pair| match pair {
+ [Border::JustBefore(n), Border::JustBefore(m)] => {
+ if n < m {
+ Some(n..=(m - 1))
+ } else {
+ None
+ }
+ }
+ [Border::JustBefore(n), Border::AfterMax] => Some(n..=u128::MAX),
+ [Border::AfterMax, _] => None,
+ })
+ .map(|range| IntRange { range, ty, span: pcx.span })
+ .map(IntRange)
+ .collect()
+ }
+
+ fn lint_overlapping_patterns(
+ self,
+ tcx: TyCtxt<'tcx>,
+ hir_id: Option<HirId>,
+ ty: Ty<'tcx>,
+ overlaps: Vec<IntRange<'tcx>>,
+ ) {
+ if let (true, Some(hir_id)) = (!overlaps.is_empty(), hir_id) {
+ tcx.struct_span_lint_hir(
+ lint::builtin::OVERLAPPING_PATTERNS,
+ hir_id,
+ self.span,
+ |lint| {
+ let mut err = lint.build("multiple patterns covering the same range");
+ err.span_label(self.span, "overlapping patterns");
+ for int_range in overlaps {
+ // Use the real type for user display of the ranges:
+ err.span_label(
+ int_range.span,
+ &format!(
+ "this range overlaps on `{}`",
+ IntRange { range: int_range.range, ty, span: DUMMY_SP }.to_pat(tcx),
+ ),
+ );
+ }
+ err.emit();
+ },
+ );
+ }
+ }
}
/// Ignore spans when comparing, they don't carry semantic information as they are only for lints.
/// has one it must not be inserted into the matrix. This shouldn't be
/// relied on for soundness.
crate fn is_useful<'p, 'tcx>(
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
matrix: &Matrix<'p, 'tcx>,
v: &PatStack<'p, 'tcx>,
witness_preference: WitnessPreference,
debug!("is_useful_expand_first_col: pcx={:#?}, expanding {:#?}", pcx, v.head());
- let ret = if let Some(constructor) = pat_constructor(cx.tcx, cx.param_env, v.head()) {
+ let constructor = pat_constructor(cx.tcx, cx.param_env, v.head());
+ let ret = if !constructor.is_wildcard() {
debug!("is_useful - expanding constructor: {:#?}", constructor);
- split_grouped_constructors(
- cx.tcx,
- cx.param_env,
- pcx,
- vec![constructor],
- matrix,
- pcx.span,
- Some(hir_id),
- )
- .into_iter()
- .map(|c| {
- is_useful_specialized(
- cx,
- matrix,
- v,
- c,
- pcx.ty,
- witness_preference,
- hir_id,
- is_under_guard,
- )
- })
- .find(|result| result.is_useful())
- .unwrap_or(NotUseful)
+ constructor
+ .split(cx, pcx, matrix, Some(hir_id))
+ .into_iter()
+ .map(|c| {
+ is_useful_specialized(
+ cx,
+ matrix,
+ v,
+ c,
+ pcx.ty,
+ witness_preference,
+ hir_id,
+ is_under_guard,
+ )
+ })
+ .find(|result| result.is_useful())
+ .unwrap_or(NotUseful)
} else {
debug!("is_useful - expanding wildcard");
- let used_ctors: Vec<Constructor<'_>> =
- matrix.heads().filter_map(|p| pat_constructor(cx.tcx, cx.param_env, p)).collect();
+ let used_ctors: Vec<Constructor<'_>> = matrix
+ .heads()
+ .map(|p| pat_constructor(cx.tcx, cx.param_env, p))
+ .filter(|c| !c.is_wildcard())
+ .collect();
debug!("is_useful_used_ctors = {:#?}", used_ctors);
// `all_ctors` are all the constructors for the given type, which
// should all be represented (or caught with the wild pattern `_`).
if missing_ctors.is_empty() {
let (all_ctors, _) = missing_ctors.into_inner();
- split_grouped_constructors(cx.tcx, cx.param_env, pcx, all_ctors, matrix, DUMMY_SP, None)
+ all_ctors
.into_iter()
+ .flat_map(|ctor| ctor.split(cx, pcx, matrix, None))
.map(|c| {
is_useful_specialized(
cx,
.find(|result| result.is_useful())
.unwrap_or(NotUseful)
} else {
- let matrix = matrix.specialize_wildcard();
- let v = v.to_tail();
+ let ctor_wild_subpatterns = Fields::empty();
+ let matrix = matrix.specialize_constructor(cx, &constructor, &ctor_wild_subpatterns);
+ // Unwrap is ok: v can always be specialized with its own constructor.
+ let v =
+ v.specialize_constructor(cx, &constructor, &ctor_wild_subpatterns, true).unwrap();
let usefulness =
is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false);
/// A shorthand for the `U(S(c, P), S(c, q))` operation from the paper. I.e., `is_useful` applied
/// to the specialised version of both the pattern matrix `P` and the new pattern `q`.
fn is_useful_specialized<'p, 'tcx>(
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ cx: &MatchCheckCtxt<'p, 'tcx>,
matrix: &Matrix<'p, 'tcx>,
v: &PatStack<'p, 'tcx>,
ctor: Constructor<'tcx>,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
pat: &Pat<'tcx>,
-) -> Option<Constructor<'tcx>> {
+) -> Constructor<'tcx> {
// This MUST be kept in sync with `IntRange::from_pat`.
match *pat.kind {
PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
- PatKind::Binding { .. } | PatKind::Wild => None,
- PatKind::Leaf { .. } | PatKind::Deref { .. } => Some(Single),
+ PatKind::Binding { .. } | PatKind::Wild => Wildcard,
+ PatKind::Leaf { .. } | PatKind::Deref { .. } => Single,
PatKind::Variant { adt_def, variant_index, .. } => {
- Some(Variant(adt_def.variants[variant_index].def_id))
+ Variant(adt_def.variants[variant_index].def_id)
}
PatKind::Constant { value } => {
if let Some(int_range) = IntRange::from_const(tcx, param_env, value, pat.span) {
- Some(IntRange(int_range))
+ IntRange(int_range)
} else {
match value.ty.kind() {
- ty::Float(_) => Some(FloatRange(value, value, RangeEnd::Included)),
- ty::Ref(_, t, _) if t.is_str() => Some(Str(value)),
+ ty::Float(_) => FloatRange(value, value, RangeEnd::Included),
+ ty::Ref(_, t, _) if t.is_str() => Str(value),
// All constants that can be structurally matched have already been expanded
// into the corresponding `Pat`s by `const_to_pat`. Constants that remain are
// opaque.
- _ => Some(Opaque),
+ _ => Opaque,
}
}
}
&end,
pat.span,
) {
- Some(IntRange(int_range))
+ IntRange(int_range)
} else {
- Some(FloatRange(lo, hi, end))
+ FloatRange(lo, hi, end)
}
}
PatKind::Array { ref prefix, ref slice, ref suffix }
let suffix = suffix.len() as u64;
let kind =
if slice.is_some() { VarLen(prefix, suffix) } else { FixedLen(prefix + suffix) };
- Some(Slice(Slice { array_len, kind }))
+ Slice(Slice { array_len, kind })
}
PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
}
}
-
-/// For exhaustive integer matching, some constructors are grouped within other constructors
-/// (namely integer typed values are grouped within ranges). However, when specialising these
-/// constructors, we want to be specialising for the underlying constructors (the integers), not
-/// the groups (the ranges). Thus we need to split the groups up. Splitting them up naïvely would
-/// mean creating a separate constructor for every single value in the range, which is clearly
-/// impractical. However, observe that for some ranges of integers, the specialisation will be
-/// identical across all values in that range (i.e., there are equivalence classes of ranges of
-/// constructors based on their `is_useful_specialized` outcome). These classes are grouped by
-/// the patterns that apply to them (in the matrix `P`). We can split the range whenever the
-/// patterns that apply to that range (specifically: the patterns that *intersect* with that range)
-/// change.
-/// Our solution, therefore, is to split the range constructor into subranges at every single point
-/// the group of intersecting patterns changes (using the method described below).
-/// And voilà! We're testing precisely those ranges that we need to, without any exhaustive matching
-/// on actual integers. The nice thing about this is that the number of subranges is linear in the
-/// number of rows in the matrix (i.e., the number of cases in the `match` statement), so we don't
-/// need to be worried about matching over gargantuan ranges.
-///
-/// Essentially, given the first column of a matrix representing ranges, looking like the following:
-///
-/// |------| |----------| |-------| ||
-/// |-------| |-------| |----| ||
-/// |---------|
-///
-/// We split the ranges up into equivalence classes so the ranges are no longer overlapping:
-///
-/// |--|--|||-||||--||---|||-------| |-|||| ||
-///
-/// The logic for determining how to split the ranges is fairly straightforward: we calculate
-/// boundaries for each interval range, sort them, then create constructors for each new interval
-/// between every pair of boundary points. (This essentially sums up to performing the intuitive
-/// merging operation depicted above.)
-///
-/// `hir_id` is `None` when we're evaluating the wildcard pattern, do not lint for overlapping in
-/// ranges that case.
-///
-/// This also splits variable-length slices into fixed-length slices.
-fn split_grouped_constructors<'p, 'tcx>(
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- pcx: PatCtxt<'tcx>,
- ctors: Vec<Constructor<'tcx>>,
- matrix: &Matrix<'p, 'tcx>,
- span: Span,
- hir_id: Option<HirId>,
-) -> Vec<Constructor<'tcx>> {
- let ty = pcx.ty;
- let mut split_ctors = Vec::with_capacity(ctors.len());
- debug!("split_grouped_constructors({:#?}, {:#?})", matrix, ctors);
-
- for ctor in ctors.into_iter() {
- match ctor {
- IntRange(ctor_range) if ctor_range.treat_exhaustively(tcx) => {
- // Fast-track if the range is trivial. In particular, don't do the overlapping
- // ranges check.
- if ctor_range.is_singleton() {
- split_ctors.push(IntRange(ctor_range));
- continue;
- }
-
- /// Represents a border between 2 integers. Because the intervals spanning borders
- /// must be able to cover every integer, we need to be able to represent
- /// 2^128 + 1 such borders.
- #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
- enum Border {
- JustBefore(u128),
- AfterMax,
- }
-
- // A function for extracting the borders of an integer interval.
- fn range_borders(r: IntRange<'_>) -> impl Iterator<Item = Border> {
- let (lo, hi) = r.range.into_inner();
- let from = Border::JustBefore(lo);
- let to = match hi.checked_add(1) {
- Some(m) => Border::JustBefore(m),
- None => Border::AfterMax,
- };
- vec![from, to].into_iter()
- }
-
- // Collect the span and range of all the intersecting ranges to lint on likely
- // incorrect range patterns. (#63987)
- let mut overlaps = vec![];
- // `borders` is the set of borders between equivalence classes: each equivalence
- // class lies between 2 borders.
- let row_borders = matrix
- .patterns
- .iter()
- .flat_map(|row| {
- IntRange::from_pat(tcx, param_env, row.head()).map(|r| (r, row.len()))
- })
- .flat_map(|(range, row_len)| {
- let intersection = ctor_range.intersection(tcx, &range);
- let should_lint = ctor_range.suspicious_intersection(&range);
- if let (Some(range), 1, true) = (&intersection, row_len, should_lint) {
- // FIXME: for now, only check for overlapping ranges on simple range
- // patterns. Otherwise with the current logic the following is detected
- // as overlapping:
- // match (10u8, true) {
- // (0 ..= 125, false) => {}
- // (126 ..= 255, false) => {}
- // (0 ..= 255, true) => {}
- // }
- overlaps.push(range.clone());
- }
- intersection
- })
- .flat_map(range_borders);
- let ctor_borders = range_borders(ctor_range.clone());
- let mut borders: Vec<_> = row_borders.chain(ctor_borders).collect();
- borders.sort_unstable();
-
- lint_overlapping_patterns(tcx, hir_id, ctor_range, ty, overlaps);
-
- // We're going to iterate through every adjacent pair of borders, making sure that
- // each represents an interval of nonnegative length, and convert each such
- // interval into a constructor.
- split_ctors.extend(
- borders
- .array_windows()
- .filter_map(|&pair| match pair {
- [Border::JustBefore(n), Border::JustBefore(m)] => {
- if n < m {
- Some(IntRange { range: n..=(m - 1), ty, span })
- } else {
- None
- }
- }
- [Border::JustBefore(n), Border::AfterMax] => {
- Some(IntRange { range: n..=u128::MAX, ty, span })
- }
- [Border::AfterMax, _] => None,
- })
- .map(IntRange),
- );
- }
- Slice(Slice { array_len, kind: VarLen(self_prefix, self_suffix) }) => {
- // The exhaustiveness-checking paper does not include any details on
- // checking variable-length slice patterns. However, they are matched
- // by an infinite collection of fixed-length array patterns.
- //
- // Checking the infinite set directly would take an infinite amount
- // of time. However, it turns out that for each finite set of
- // patterns `P`, all sufficiently large array lengths are equivalent:
- //
- // Each slice `s` with a "sufficiently-large" length `l ≥ L` that applies
- // to exactly the subset `Pₜ` of `P` can be transformed to a slice
- // `sₘ` for each sufficiently-large length `m` that applies to exactly
- // the same subset of `P`.
- //
- // Because of that, each witness for reachability-checking from one
- // of the sufficiently-large lengths can be transformed to an
- // equally-valid witness from any other length, so we only have
- // to check slice lengths from the "minimal sufficiently-large length"
- // and below.
- //
- // Note that the fact that there is a *single* `sₘ` for each `m`
- // not depending on the specific pattern in `P` is important: if
- // you look at the pair of patterns
- // `[true, ..]`
- // `[.., false]`
- // Then any slice of length ≥1 that matches one of these two
- // patterns can be trivially turned to a slice of any
- // other length ≥1 that matches them and vice-versa - for
- // but the slice from length 2 `[false, true]` that matches neither
- // of these patterns can't be turned to a slice from length 1 that
- // matches neither of these patterns, so we have to consider
- // slices from length 2 there.
- //
- // Now, to see that that length exists and find it, observe that slice
- // patterns are either "fixed-length" patterns (`[_, _, _]`) or
- // "variable-length" patterns (`[_, .., _]`).
- //
- // For fixed-length patterns, all slices with lengths *longer* than
- // the pattern's length have the same outcome (of not matching), so
- // as long as `L` is greater than the pattern's length we can pick
- // any `sₘ` from that length and get the same result.
- //
- // For variable-length patterns, the situation is more complicated,
- // because as seen above the precise value of `sₘ` matters.
- //
- // However, for each variable-length pattern `p` with a prefix of length
- // `plₚ` and suffix of length `slₚ`, only the first `plₚ` and the last
- // `slₚ` elements are examined.
- //
- // Therefore, as long as `L` is positive (to avoid concerns about empty
- // types), all elements after the maximum prefix length and before
- // the maximum suffix length are not examined by any variable-length
- // pattern, and therefore can be added/removed without affecting
- // them - creating equivalent patterns from any sufficiently-large
- // length.
- //
- // Of course, if fixed-length patterns exist, we must be sure
- // that our length is large enough to miss them all, so
- // we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
- //
- // for example, with the above pair of patterns, all elements
- // but the first and last can be added/removed, so any
- // witness of length ≥2 (say, `[false, false, true]`) can be
- // turned to a witness from any other length ≥2.
-
- let mut max_prefix_len = self_prefix;
- let mut max_suffix_len = self_suffix;
- let mut max_fixed_len = 0;
-
- let head_ctors =
- matrix.heads().filter_map(|pat| pat_constructor(tcx, param_env, pat));
- for ctor in head_ctors {
- if let Slice(slice) = ctor {
- match slice.pattern_kind() {
- FixedLen(len) => {
- max_fixed_len = cmp::max(max_fixed_len, len);
- }
- VarLen(prefix, suffix) => {
- max_prefix_len = cmp::max(max_prefix_len, prefix);
- max_suffix_len = cmp::max(max_suffix_len, suffix);
- }
- }
- }
- }
-
- // For diagnostics, we keep the prefix and suffix lengths separate, so in the case
- // where `max_fixed_len + 1` is the largest, we adapt `max_prefix_len` accordingly,
- // so that `L = max_prefix_len + max_suffix_len`.
- if max_fixed_len + 1 >= max_prefix_len + max_suffix_len {
- // The subtraction can't overflow thanks to the above check.
- // The new `max_prefix_len` is also guaranteed to be larger than its previous
- // value.
- max_prefix_len = max_fixed_len + 1 - max_suffix_len;
- }
-
- match array_len {
- Some(len) => {
- let kind = if max_prefix_len + max_suffix_len < len {
- VarLen(max_prefix_len, max_suffix_len)
- } else {
- FixedLen(len)
- };
- split_ctors.push(Slice(Slice { array_len, kind }));
- }
- None => {
- // `ctor` originally covered the range `(self_prefix +
- // self_suffix..infinity)`. We now split it into two: lengths smaller than
- // `max_prefix_len + max_suffix_len` are treated independently as
- // fixed-lengths slices, and lengths above are captured by a final VarLen
- // constructor.
- split_ctors.extend(
- (self_prefix + self_suffix..max_prefix_len + max_suffix_len)
- .map(|len| Slice(Slice { array_len, kind: FixedLen(len) })),
- );
- split_ctors.push(Slice(Slice {
- array_len,
- kind: VarLen(max_prefix_len, max_suffix_len),
- }));
- }
- }
- }
- // Any other constructor can be used unchanged.
- _ => split_ctors.push(ctor),
- }
- }
-
- debug!("split_grouped_constructors(..)={:#?}", split_ctors);
- split_ctors
-}
-
-fn lint_overlapping_patterns<'tcx>(
- tcx: TyCtxt<'tcx>,
- hir_id: Option<HirId>,
- ctor_range: IntRange<'tcx>,
- ty: Ty<'tcx>,
- overlaps: Vec<IntRange<'tcx>>,
-) {
- if let (true, Some(hir_id)) = (!overlaps.is_empty(), hir_id) {
- tcx.struct_span_lint_hir(
- lint::builtin::OVERLAPPING_PATTERNS,
- hir_id,
- ctor_range.span,
- |lint| {
- let mut err = lint.build("multiple patterns covering the same range");
- err.span_label(ctor_range.span, "overlapping patterns");
- for int_range in overlaps {
- // Use the real type for user display of the ranges:
- err.span_label(
- int_range.span,
- &format!(
- "this range overlaps on `{}`",
- IntRange { range: int_range.range, ty, span: DUMMY_SP }.to_pat(tcx),
- ),
- );
- }
- err.emit();
- },
- );
- }
-}
-
-/// This is the main specialization step. It expands the pattern
-/// into `arity` patterns based on the constructor. For most patterns, the step is trivial,
-/// for instance tuple patterns are flattened and box patterns expand into their inner pattern.
-/// Returns `None` if the pattern does not have the given constructor.
-///
-/// OTOH, slice patterns with a subslice pattern (tail @ ..) can be expanded into multiple
-/// different patterns.
-/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
-/// fields filled with wild patterns.
-///
-/// This is roughly the inverse of `Constructor::apply`.
-fn specialize_one_pattern<'p, 'tcx>(
- cx: &mut MatchCheckCtxt<'p, 'tcx>,
- pat: &'p Pat<'tcx>,
- constructor: &Constructor<'tcx>,
- ctor_wild_subpatterns: &Fields<'p, 'tcx>,
- is_its_own_ctor: bool, // Whether `ctor` is known to be derived from `pat`
-) -> Option<Fields<'p, 'tcx>> {
- if let NonExhaustive = constructor {
- // Only a wildcard pattern can match the special extra constructor.
- if !pat.is_wildcard() {
- return None;
- }
- return Some(Fields::empty());
- }
-
- if let Opaque = constructor {
- // Only a wildcard pattern can match an opaque constant, unless we're specializing the
- // value against its own constructor. That happens when we call
- // `v.specialize_constructor(ctor)` with `ctor` obtained from `pat_constructor(v.head())`.
- // For example, in the following match, when we are dealing with the third branch, we will
- // specialize with an `Opaque` ctor. We want to ignore the second branch because opaque
- // constants should not be inspected, but we don't want to ignore the current (third)
- // branch, as that would cause us to always conclude that such a branch is unreachable.
- // ```rust
- // #[derive(PartialEq)]
- // struct Foo(i32);
- // impl Eq for Foo {}
- // const FOO: Foo = Foo(42);
- //
- // match (Foo(0), true) {
- // (_, true) => {}
- // (FOO, true) => {}
- // (FOO, false) => {}
- // }
- // ```
- if is_its_own_ctor || pat.is_wildcard() {
- return Some(Fields::empty());
- } else {
- return None;
- }
- }
-
- let result = match *pat.kind {
- PatKind::AscribeUserType { .. } => bug!(), // Handled by `expand_pattern`
-
- PatKind::Binding { .. } | PatKind::Wild => Some(ctor_wild_subpatterns.clone()),
-
- PatKind::Variant { adt_def, variant_index, ref subpatterns, .. } => {
- let variant = &adt_def.variants[variant_index];
- if constructor != &Variant(variant.def_id) {
- return None;
- }
- Some(ctor_wild_subpatterns.replace_with_fieldpats(subpatterns))
- }
-
- PatKind::Leaf { ref subpatterns } => {
- Some(ctor_wild_subpatterns.replace_with_fieldpats(subpatterns))
- }
-
- PatKind::Deref { ref subpattern } => Some(Fields::from_single_pattern(subpattern)),
-
- PatKind::Constant { .. } | PatKind::Range { .. } => {
- match constructor {
- IntRange(ctor) => {
- let pat = IntRange::from_pat(cx.tcx, cx.param_env, pat)?;
- ctor.intersection(cx.tcx, &pat)?;
- // Constructor splitting should ensure that all intersections we encounter
- // are actually inclusions.
- assert!(ctor.is_subrange(&pat));
- }
- FloatRange(ctor_from, ctor_to, ctor_end) => {
- let (pat_from, pat_to, pat_end, ty) = match *pat.kind {
- PatKind::Constant { value } => (value, value, RangeEnd::Included, value.ty),
- PatKind::Range(PatRange { lo, hi, end }) => (lo, hi, end, lo.ty),
- _ => unreachable!(), // This is ensured by the branch we're in
- };
- let to = compare_const_vals(cx.tcx, ctor_to, pat_to, cx.param_env, ty)?;
- let from = compare_const_vals(cx.tcx, ctor_from, pat_from, cx.param_env, ty)?;
- let intersects = (from == Ordering::Greater || from == Ordering::Equal)
- && (to == Ordering::Less
- || (pat_end == *ctor_end && to == Ordering::Equal));
- if !intersects {
- return None;
- }
- }
- Str(ctor_value) => {
- let pat_value = match *pat.kind {
- PatKind::Constant { value } => value,
- _ => span_bug!(
- pat.span,
- "unexpected range pattern {:?} for constant value ctor",
- pat
- ),
- };
-
- // FIXME: there's probably a more direct way of comparing for equality
- if compare_const_vals(cx.tcx, ctor_value, pat_value, cx.param_env, pat.ty)?
- != Ordering::Equal
- {
- return None;
- }
- }
- _ => {
- // If we reach here, we must be trying to inspect an opaque constant. Thus we skip
- // the row.
- return None;
- }
- }
- Some(Fields::empty())
- }
-
- PatKind::Array { ref prefix, ref slice, ref suffix }
- | PatKind::Slice { ref prefix, ref slice, ref suffix } => match *constructor {
- Slice(_) => {
- // Number of subpatterns for this pattern
- let pat_len = prefix.len() + suffix.len();
- // Number of subpatterns for this constructor
- let arity = ctor_wild_subpatterns.len();
-
- if (slice.is_none() && arity != pat_len) || pat_len > arity {
- return None;
- }
-
- // Replace the prefix and the suffix with the given patterns, leaving wildcards in
- // the middle if there was a subslice pattern `..`.
- let prefix = prefix.iter().enumerate();
- let suffix = suffix.iter().enumerate().map(|(i, p)| (arity - suffix.len() + i, p));
- Some(ctor_wild_subpatterns.replace_fields_indexed(prefix.chain(suffix)))
- }
- _ => span_bug!(pat.span, "unexpected ctor {:?} for slice pat", constructor),
- },
-
- PatKind::Or { .. } => bug!("Or-pattern should have been expanded earlier on."),
- };
- debug!(
- "specialize({:#?}, {:#?}, {:#?}) = {:#?}",
- pat, constructor, ctor_wild_subpatterns, result
- );
-
- result
-}