1 //! Candidate selection. See the [rustc dev guide] for more information on how this works.
3 //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
5 use self::EvaluationResult::*;
6 use self::SelectionCandidate::*;
8 use super::coherence::{self, Conflict};
10 use super::project::normalize_with_depth_to;
12 use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
14 use super::DerivedObligationCause;
15 use super::Obligation;
16 use super::ObligationCauseCode;
18 use super::SelectionResult;
19 use super::TraitQueryMode;
20 use super::{Normalized, ProjectionCacheKey};
21 use super::{ObligationCause, PredicateObligation, TraitObligation};
22 use super::{Overflow, SelectionError, Unimplemented};
24 use crate::infer::{InferCtxt, InferOk, TypeFreshener};
25 use crate::traits::error_reporting::InferCtxtExt;
26 use crate::traits::project::ProjectionCacheKeyExt;
27 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
28 use rustc_data_structures::stack::ensure_sufficient_stack;
29 use rustc_errors::ErrorReported;
31 use rustc_hir::def_id::DefId;
32 use rustc_middle::dep_graph::{DepKind, DepNodeIndex};
33 use rustc_middle::mir::interpret::ErrorHandled;
34 use rustc_middle::ty::fast_reject;
35 use rustc_middle::ty::relate::TypeRelation;
36 use rustc_middle::ty::subst::{GenericArgKind, Subst, SubstsRef};
37 use rustc_middle::ty::{
38 self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable, WithConstness,
40 use rustc_span::symbol::sym;
42 use std::cell::{Cell, RefCell};
44 use std::fmt::{self, Display};
48 pub use rustc_middle::traits::select::*;
50 mod candidate_assembly;
53 #[derive(Clone, Debug)]
54 pub enum IntercrateAmbiguityCause {
55 DownstreamCrate { trait_desc: String, self_desc: Option<String> },
56 UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String> },
57 ReservationImpl { message: String },
60 impl IntercrateAmbiguityCause {
61 /// Emits notes when the overlap is caused by complex intercrate ambiguities.
62 /// See #23980 for details.
63 pub fn add_intercrate_ambiguity_hint(&self, err: &mut rustc_errors::DiagnosticBuilder<'_>) {
64 err.note(&self.intercrate_ambiguity_hint());
67 pub fn intercrate_ambiguity_hint(&self) -> String {
69 &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => {
70 let self_desc = if let &Some(ref ty) = self_desc {
71 format!(" for type `{}`", ty)
75 format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
77 &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => {
78 let self_desc = if let &Some(ref ty) = self_desc {
79 format!(" for type `{}`", ty)
84 "upstream crates may add a new impl of trait `{}`{} \
89 &IntercrateAmbiguityCause::ReservationImpl { ref message } => message.clone(),
94 pub struct SelectionContext<'cx, 'tcx> {
95 infcx: &'cx InferCtxt<'cx, 'tcx>,
97 /// Freshener used specifically for entries on the obligation
98 /// stack. This ensures that all entries on the stack at one time
99 /// will have the same set of placeholder entries, which is
100 /// important for checking for trait bounds that recursively
101 /// require themselves.
102 freshener: TypeFreshener<'cx, 'tcx>,
104 /// If `true`, indicates that the evaluation should be conservative
105 /// and consider the possibility of types outside this crate.
106 /// This comes up primarily when resolving ambiguity. Imagine
107 /// there is some trait reference `$0: Bar` where `$0` is an
108 /// inference variable. If `intercrate` is true, then we can never
109 /// say for sure that this reference is not implemented, even if
110 /// there are *no impls at all for `Bar`*, because `$0` could be
111 /// bound to some type that in a downstream crate that implements
112 /// `Bar`. This is the suitable mode for coherence. Elsewhere,
113 /// though, we set this to false, because we are only interested
114 /// in types that the user could actually have written --- in
115 /// other words, we consider `$0: Bar` to be unimplemented if
116 /// there is no type that the user could *actually name* that
117 /// would satisfy it. This avoids crippling inference, basically.
120 intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
122 /// Controls whether or not to filter out negative impls when selecting.
123 /// This is used in librustdoc to distinguish between the lack of an impl
124 /// and a negative impl
125 allow_negative_impls: bool,
127 /// The mode that trait queries run in, which informs our error handling
128 /// policy. In essence, canonicalized queries need their errors propagated
129 /// rather than immediately reported because we do not have accurate spans.
130 query_mode: TraitQueryMode,
133 // A stack that walks back up the stack frame.
134 struct TraitObligationStack<'prev, 'tcx> {
135 obligation: &'prev TraitObligation<'tcx>,
137 /// The trait ref from `obligation` but "freshened" with the
138 /// selection-context's freshener. Used to check for recursion.
139 fresh_trait_ref: ty::PolyTraitRef<'tcx>,
141 /// Starts out equal to `depth` -- if, during evaluation, we
142 /// encounter a cycle, then we will set this flag to the minimum
143 /// depth of that cycle for all participants in the cycle. These
144 /// participants will then forego caching their results. This is
145 /// not the most efficient solution, but it addresses #60010. The
146 /// problem we are trying to prevent:
148 /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
149 /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
150 /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
152 /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
153 /// is `EvaluatedToOk`; this is because they were only considered
154 /// ok on the premise that if `A: AutoTrait` held, but we indeed
155 /// encountered a problem (later on) with `A: AutoTrait. So we
156 /// currently set a flag on the stack node for `B: AutoTrait` (as
157 /// well as the second instance of `A: AutoTrait`) to suppress
160 /// This is a simple, targeted fix. A more-performant fix requires
161 /// deeper changes, but would permit more caching: we could
162 /// basically defer caching until we have fully evaluated the
163 /// tree, and then cache the entire tree at once. In any case, the
164 /// performance impact here shouldn't be so horrible: every time
165 /// this is hit, we do cache at least one trait, so we only
166 /// evaluate each member of a cycle up to N times, where N is the
167 /// length of the cycle. This means the performance impact is
168 /// bounded and we shouldn't have any terrible worst-cases.
169 reached_depth: Cell<usize>,
171 previous: TraitObligationStackList<'prev, 'tcx>,
173 /// The number of parent frames plus one (thus, the topmost frame has depth 1).
176 /// The depth-first number of this node in the search graph -- a
177 /// pre-order index. Basically, a freshly incremented counter.
181 struct SelectionCandidateSet<'tcx> {
182 // A list of candidates that definitely apply to the current
183 // obligation (meaning: types unify).
184 vec: Vec<SelectionCandidate<'tcx>>,
186 // If `true`, then there were candidates that might or might
187 // not have applied, but we couldn't tell. This occurs when some
188 // of the input types are type variables, in which case there are
189 // various "builtin" rules that might or might not trigger.
193 #[derive(PartialEq, Eq, Debug, Clone)]
194 struct EvaluatedCandidate<'tcx> {
195 candidate: SelectionCandidate<'tcx>,
196 evaluation: EvaluationResult,
199 /// When does the builtin impl for `T: Trait` apply?
200 enum BuiltinImplConditions<'tcx> {
201 /// The impl is conditional on `T1, T2, ...: Trait`.
202 Where(ty::Binder<Vec<Ty<'tcx>>>),
203 /// There is no built-in impl. There may be some other
204 /// candidate (a where-clause or user-defined impl).
206 /// It is unknown whether there is an impl.
210 impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
211 pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
214 freshener: infcx.freshener(),
216 intercrate_ambiguity_causes: None,
217 allow_negative_impls: false,
218 query_mode: TraitQueryMode::Standard,
222 pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
225 freshener: infcx.freshener(),
227 intercrate_ambiguity_causes: None,
228 allow_negative_impls: false,
229 query_mode: TraitQueryMode::Standard,
233 pub fn with_negative(
234 infcx: &'cx InferCtxt<'cx, 'tcx>,
235 allow_negative_impls: bool,
236 ) -> SelectionContext<'cx, 'tcx> {
237 debug!("with_negative({:?})", allow_negative_impls);
240 freshener: infcx.freshener(),
242 intercrate_ambiguity_causes: None,
243 allow_negative_impls,
244 query_mode: TraitQueryMode::Standard,
248 pub fn with_query_mode(
249 infcx: &'cx InferCtxt<'cx, 'tcx>,
250 query_mode: TraitQueryMode,
251 ) -> SelectionContext<'cx, 'tcx> {
252 debug!("with_query_mode({:?})", query_mode);
255 freshener: infcx.freshener(),
257 intercrate_ambiguity_causes: None,
258 allow_negative_impls: false,
263 /// Enables tracking of intercrate ambiguity causes. These are
264 /// used in coherence to give improved diagnostics. We don't do
265 /// this until we detect a coherence error because it can lead to
266 /// false overflow results (#47139) and because it costs
267 /// computation time.
268 pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
269 assert!(self.intercrate);
270 assert!(self.intercrate_ambiguity_causes.is_none());
271 self.intercrate_ambiguity_causes = Some(vec![]);
272 debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
275 /// Gets the intercrate ambiguity causes collected since tracking
276 /// was enabled and disables tracking at the same time. If
277 /// tracking is not enabled, just returns an empty vector.
278 pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
279 assert!(self.intercrate);
280 self.intercrate_ambiguity_causes.take().unwrap_or(vec![])
283 pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
287 pub fn tcx(&self) -> TyCtxt<'tcx> {
291 pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
295 ///////////////////////////////////////////////////////////////////////////
298 // The selection phase tries to identify *how* an obligation will
299 // be resolved. For example, it will identify which impl or
300 // parameter bound is to be used. The process can be inconclusive
301 // if the self type in the obligation is not fully inferred. Selection
302 // can result in an error in one of two ways:
304 // 1. If no applicable impl or parameter bound can be found.
305 // 2. If the output type parameters in the obligation do not match
306 // those specified by the impl/bound. For example, if the obligation
307 // is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
308 // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
310 /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
311 /// type environment by performing unification.
314 obligation: &TraitObligation<'tcx>,
315 ) -> SelectionResult<'tcx, Selection<'tcx>> {
316 debug!("select({:?})", obligation);
317 debug_assert!(!obligation.predicate.has_escaping_bound_vars());
319 let pec = &ProvisionalEvaluationCache::default();
320 let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
322 let candidate = match self.candidate_from_obligation(&stack) {
323 Err(SelectionError::Overflow) => {
324 // In standard mode, overflow must have been caught and reported
326 assert!(self.query_mode == TraitQueryMode::Canonical);
327 return Err(SelectionError::Overflow);
335 Ok(Some(candidate)) => candidate,
338 match self.confirm_candidate(obligation, candidate) {
339 Err(SelectionError::Overflow) => {
340 assert!(self.query_mode == TraitQueryMode::Canonical);
341 Err(SelectionError::Overflow)
344 Ok(candidate) => Ok(Some(candidate)),
348 ///////////////////////////////////////////////////////////////////////////
351 // Tests whether an obligation can be selected or whether an impl
352 // can be applied to particular types. It skips the "confirmation"
353 // step and hence completely ignores output type parameters.
355 // The result is "true" if the obligation *may* hold and "false" if
356 // we can be sure it does not.
358 /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
359 pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
360 debug!("predicate_may_hold_fatal({:?})", obligation);
362 // This fatal query is a stopgap that should only be used in standard mode,
363 // where we do not expect overflow to be propagated.
364 assert!(self.query_mode == TraitQueryMode::Standard);
366 self.evaluate_root_obligation(obligation)
367 .expect("Overflow should be caught earlier in standard query mode")
371 /// Evaluates whether the obligation `obligation` can be satisfied
372 /// and returns an `EvaluationResult`. This is meant for the
374 pub fn evaluate_root_obligation(
376 obligation: &PredicateObligation<'tcx>,
377 ) -> Result<EvaluationResult, OverflowError> {
378 self.evaluation_probe(|this| {
379 this.evaluate_predicate_recursively(
380 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
388 op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
389 ) -> Result<EvaluationResult, OverflowError> {
390 self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
391 let result = op(self)?;
393 match self.infcx.leak_check(true, snapshot) {
395 Err(_) => return Ok(EvaluatedToErr),
398 match self.infcx.region_constraints_added_in_snapshot(snapshot) {
400 Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)),
405 /// Evaluates the predicates in `predicates` recursively. Note that
406 /// this applies projections in the predicates, and therefore
407 /// is run within an inference probe.
408 fn evaluate_predicates_recursively<'o, I>(
410 stack: TraitObligationStackList<'o, 'tcx>,
412 ) -> Result<EvaluationResult, OverflowError>
414 I: IntoIterator<Item = PredicateObligation<'tcx>>,
416 let mut result = EvaluatedToOk;
417 for obligation in predicates {
418 let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
419 debug!("evaluate_predicate_recursively({:?}) = {:?}", obligation, eval);
420 if let EvaluatedToErr = eval {
421 // fast-path - EvaluatedToErr is the top of the lattice,
422 // so we don't need to look on the other predicates.
423 return Ok(EvaluatedToErr);
425 result = cmp::max(result, eval);
431 fn evaluate_predicate_recursively<'o>(
433 previous_stack: TraitObligationStackList<'o, 'tcx>,
434 obligation: PredicateObligation<'tcx>,
435 ) -> Result<EvaluationResult, OverflowError> {
437 "evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})",
438 previous_stack.head(),
442 // `previous_stack` stores a `TraitObligation`, while `obligation` is
443 // a `PredicateObligation`. These are distinct types, so we can't
444 // use any `Option` combinator method that would force them to be
446 match previous_stack.head() {
447 Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
448 None => self.check_recursion_limit(&obligation, &obligation)?,
451 match obligation.predicate.skip_binders() {
452 ty::PredicateAtom::Trait(t, _) => {
453 let t = ty::Binder::bind(t);
454 debug_assert!(!t.has_escaping_bound_vars());
455 let obligation = obligation.with(t);
456 self.evaluate_trait_predicate_recursively(previous_stack, obligation)
459 ty::PredicateAtom::Subtype(p) => {
460 let p = ty::Binder::bind(p);
461 // Does this code ever run?
462 match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
463 Some(Ok(InferOk { mut obligations, .. })) => {
464 self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
465 self.evaluate_predicates_recursively(
467 obligations.into_iter(),
470 Some(Err(_)) => Ok(EvaluatedToErr),
471 None => Ok(EvaluatedToAmbig),
475 ty::PredicateAtom::WellFormed(arg) => match wf::obligations(
477 obligation.param_env,
478 obligation.cause.body_id,
480 obligation.cause.span,
482 Some(mut obligations) => {
483 self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
484 self.evaluate_predicates_recursively(previous_stack, obligations.into_iter())
486 None => Ok(EvaluatedToAmbig),
489 ty::PredicateAtom::TypeOutlives(..) | ty::PredicateAtom::RegionOutlives(..) => {
490 // We do not consider region relationships when evaluating trait matches.
491 Ok(EvaluatedToOkModuloRegions)
494 ty::PredicateAtom::ObjectSafe(trait_def_id) => {
495 if self.tcx().is_object_safe(trait_def_id) {
502 ty::PredicateAtom::Projection(data) => {
503 let data = ty::Binder::bind(data);
504 let project_obligation = obligation.with(data);
505 match project::poly_project_and_unify_type(self, &project_obligation) {
506 Ok(Some(mut subobligations)) => {
507 self.add_depth(subobligations.iter_mut(), obligation.recursion_depth);
508 let result = self.evaluate_predicates_recursively(
510 subobligations.into_iter(),
513 ProjectionCacheKey::from_poly_projection_predicate(self, data)
515 self.infcx.inner.borrow_mut().projection_cache().complete(key);
519 Ok(None) => Ok(EvaluatedToAmbig),
520 Err(_) => Ok(EvaluatedToErr),
524 ty::PredicateAtom::ClosureKind(_, closure_substs, kind) => {
525 match self.infcx.closure_kind(closure_substs) {
526 Some(closure_kind) => {
527 if closure_kind.extends(kind) {
533 None => Ok(EvaluatedToAmbig),
537 ty::PredicateAtom::ConstEvaluatable(def_id, substs) => {
538 match self.tcx().const_eval_resolve(
539 obligation.param_env,
545 Ok(_) => Ok(EvaluatedToOk),
546 Err(ErrorHandled::TooGeneric) => Ok(EvaluatedToAmbig),
547 Err(_) => Ok(EvaluatedToErr),
551 ty::PredicateAtom::ConstEquate(c1, c2) => {
552 debug!("evaluate_predicate_recursively: equating consts c1={:?} c2={:?}", c1, c2);
554 let evaluate = |c: &'tcx ty::Const<'tcx>| {
555 if let ty::ConstKind::Unevaluated(def, substs, promoted) = c.val {
558 obligation.param_env,
562 Some(obligation.cause.span),
564 .map(|val| ty::Const::from_value(self.tcx(), val, c.ty))
570 match (evaluate(c1), evaluate(c2)) {
571 (Ok(c1), Ok(c2)) => {
572 match self.infcx().at(&obligation.cause, obligation.param_env).eq(c1, c2) {
573 Ok(_) => Ok(EvaluatedToOk),
574 Err(_) => Ok(EvaluatedToErr),
577 (Err(ErrorHandled::Reported(ErrorReported)), _)
578 | (_, Err(ErrorHandled::Reported(ErrorReported))) => Ok(EvaluatedToErr),
579 (Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => span_bug!(
580 obligation.cause.span(self.tcx()),
581 "ConstEquate: const_eval_resolve returned an unexpected error"
583 (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
591 fn evaluate_trait_predicate_recursively<'o>(
593 previous_stack: TraitObligationStackList<'o, 'tcx>,
594 mut obligation: TraitObligation<'tcx>,
595 ) -> Result<EvaluationResult, OverflowError> {
596 debug!("evaluate_trait_predicate_recursively({:?})", obligation);
599 && obligation.is_global()
600 && obligation.param_env.caller_bounds().iter().all(|bound| bound.needs_subst())
602 // If a param env has no global bounds, global obligations do not
603 // depend on its particular value in order to work, so we can clear
604 // out the param env and get better caching.
605 debug!("evaluate_trait_predicate_recursively({:?}) - in global", obligation);
606 obligation.param_env = obligation.param_env.without_caller_bounds();
609 let stack = self.push_stack(previous_stack, &obligation);
610 let fresh_trait_ref = stack.fresh_trait_ref;
611 if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) {
612 debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
616 if let Some(result) = stack.cache().get_provisional(fresh_trait_ref) {
617 debug!("PROVISIONAL CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
618 stack.update_reached_depth(stack.cache().current_reached_depth());
622 // Check if this is a match for something already on the
623 // stack. If so, we don't want to insert the result into the
624 // main cache (it is cycle dependent) nor the provisional
625 // cache (which is meant for things that have completed but
626 // for a "backedge" -- this result *is* the backedge).
627 if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
628 return Ok(cycle_result);
631 let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
632 let result = result?;
634 if !result.must_apply_modulo_regions() {
635 stack.cache().on_failure(stack.dfn);
638 let reached_depth = stack.reached_depth.get();
639 if reached_depth >= stack.depth {
640 debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result);
641 self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result);
643 stack.cache().on_completion(stack.depth, |fresh_trait_ref, provisional_result| {
644 self.insert_evaluation_cache(
645 obligation.param_env,
648 provisional_result.max(result),
652 debug!("PROVISIONAL: {:?}={:?}", fresh_trait_ref, result);
654 "evaluate_trait_predicate_recursively: caching provisionally because {:?} \
655 is a cycle participant (at depth {}, reached depth {})",
656 fresh_trait_ref, stack.depth, reached_depth,
659 stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_ref, result);
665 /// If there is any previous entry on the stack that precisely
666 /// matches this obligation, then we can assume that the
667 /// obligation is satisfied for now (still all other conditions
668 /// must be met of course). One obvious case this comes up is
669 /// marker traits like `Send`. Think of a linked list:
671 /// struct List<T> { data: T, next: Option<Box<List<T>>> }
673 /// `Box<List<T>>` will be `Send` if `T` is `Send` and
674 /// `Option<Box<List<T>>>` is `Send`, and in turn
675 /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
678 /// Note that we do this comparison using the `fresh_trait_ref`
679 /// fields. Because these have all been freshened using
680 /// `self.freshener`, we can be sure that (a) this will not
681 /// affect the inferencer state and (b) that if we see two
682 /// fresh regions with the same index, they refer to the same
683 /// unbound type variable.
684 fn check_evaluation_cycle(
686 stack: &TraitObligationStack<'_, 'tcx>,
687 ) -> Option<EvaluationResult> {
688 if let Some(cycle_depth) = stack
690 .skip(1) // Skip top-most frame.
692 stack.obligation.param_env == prev.obligation.param_env
693 && stack.fresh_trait_ref == prev.fresh_trait_ref
695 .map(|stack| stack.depth)
698 "evaluate_stack({:?}) --> recursive at depth {}",
699 stack.fresh_trait_ref, cycle_depth,
702 // If we have a stack like `A B C D E A`, where the top of
703 // the stack is the final `A`, then this will iterate over
704 // `A, E, D, C, B` -- i.e., all the participants apart
705 // from the cycle head. We mark them as participating in a
706 // cycle. This suppresses caching for those nodes. See
707 // `in_cycle` field for more details.
708 stack.update_reached_depth(cycle_depth);
710 // Subtle: when checking for a coinductive cycle, we do
711 // not compare using the "freshened trait refs" (which
712 // have erased regions) but rather the fully explicit
713 // trait refs. This is important because it's only a cycle
714 // if the regions match exactly.
715 let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
716 let tcx = self.tcx();
718 cycle.map(|stack| stack.obligation.predicate.without_const().to_predicate(tcx));
719 if self.coinductive_match(cycle) {
720 debug!("evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref);
723 debug!("evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref);
724 Some(EvaluatedToRecur)
731 fn evaluate_stack<'o>(
733 stack: &TraitObligationStack<'o, 'tcx>,
734 ) -> Result<EvaluationResult, OverflowError> {
735 // In intercrate mode, whenever any of the generics are unbound,
736 // there can always be an impl. Even if there are no impls in
737 // this crate, perhaps the type would be unified with
738 // something from another crate that does provide an impl.
740 // In intra mode, we must still be conservative. The reason is
741 // that we want to avoid cycles. Imagine an impl like:
743 // impl<T:Eq> Eq for Vec<T>
745 // and a trait reference like `$0 : Eq` where `$0` is an
746 // unbound variable. When we evaluate this trait-reference, we
747 // will unify `$0` with `Vec<$1>` (for some fresh variable
748 // `$1`), on the condition that `$1 : Eq`. We will then wind
749 // up with many candidates (since that are other `Eq` impls
750 // that apply) and try to winnow things down. This results in
751 // a recursive evaluation that `$1 : Eq` -- as you can
752 // imagine, this is just where we started. To avoid that, we
753 // check for unbound variables and return an ambiguous (hence possible)
754 // match if we've seen this trait before.
756 // This suffices to allow chains like `FnMut` implemented in
757 // terms of `Fn` etc, but we could probably make this more
759 let unbound_input_types =
760 stack.fresh_trait_ref.skip_binder().substs.types().any(|ty| ty.is_fresh());
761 // This check was an imperfect workaround for a bug in the old
762 // intercrate mode; it should be removed when that goes away.
763 if unbound_input_types && self.intercrate {
765 "evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous",
766 stack.fresh_trait_ref
768 // Heuristics: show the diagnostics when there are no candidates in crate.
769 if self.intercrate_ambiguity_causes.is_some() {
770 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
771 if let Ok(candidate_set) = self.assemble_candidates(stack) {
772 if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
773 let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
774 let self_ty = trait_ref.self_ty();
775 let cause = IntercrateAmbiguityCause::DownstreamCrate {
776 trait_desc: trait_ref.print_only_trait_path().to_string(),
777 self_desc: if self_ty.has_concrete_skeleton() {
778 Some(self_ty.to_string())
783 debug!("evaluate_stack: pushing cause = {:?}", cause);
784 self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
788 return Ok(EvaluatedToAmbig);
790 if unbound_input_types
791 && stack.iter().skip(1).any(|prev| {
792 stack.obligation.param_env == prev.obligation.param_env
793 && self.match_fresh_trait_refs(
794 stack.fresh_trait_ref,
795 prev.fresh_trait_ref,
796 prev.obligation.param_env,
801 "evaluate_stack({:?}) --> unbound argument, recursive --> giving up",
802 stack.fresh_trait_ref
804 return Ok(EvaluatedToUnknown);
807 match self.candidate_from_obligation(stack) {
808 Ok(Some(c)) => self.evaluate_candidate(stack, &c),
809 Ok(None) => Ok(EvaluatedToAmbig),
810 Err(Overflow) => Err(OverflowError),
811 Err(..) => Ok(EvaluatedToErr),
815 /// For defaulted traits, we use a co-inductive strategy to solve, so
816 /// that recursion is ok. This routine returns `true` if the top of the
817 /// stack (`cycle[0]`):
819 /// - is a defaulted trait,
820 /// - it also appears in the backtrace at some position `X`,
821 /// - all the predicates at positions `X..` between `X` and the top are
822 /// also defaulted traits.
823 pub fn coinductive_match<I>(&mut self, cycle: I) -> bool
825 I: Iterator<Item = ty::Predicate<'tcx>>,
827 let mut cycle = cycle;
828 cycle.all(|predicate| self.coinductive_predicate(predicate))
831 fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool {
832 let result = match predicate.skip_binders() {
833 ty::PredicateAtom::Trait(ref data, _) => self.tcx().trait_is_auto(data.def_id()),
836 debug!("coinductive_predicate({:?}) = {:?}", predicate, result);
840 /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
841 /// obligations are met. Returns whether `candidate` remains viable after this further
843 fn evaluate_candidate<'o>(
845 stack: &TraitObligationStack<'o, 'tcx>,
846 candidate: &SelectionCandidate<'tcx>,
847 ) -> Result<EvaluationResult, OverflowError> {
849 "evaluate_candidate: depth={} candidate={:?}",
850 stack.obligation.recursion_depth, candidate
852 let result = self.evaluation_probe(|this| {
853 let candidate = (*candidate).clone();
854 match this.confirm_candidate(stack.obligation, candidate) {
855 Ok(selection) => this.evaluate_predicates_recursively(
857 selection.nested_obligations().into_iter(),
859 Err(..) => Ok(EvaluatedToErr),
863 "evaluate_candidate: depth={} result={:?}",
864 stack.obligation.recursion_depth, result
869 fn check_evaluation_cache(
871 param_env: ty::ParamEnv<'tcx>,
872 trait_ref: ty::PolyTraitRef<'tcx>,
873 ) -> Option<EvaluationResult> {
874 let tcx = self.tcx();
875 if self.can_use_global_caches(param_env) {
876 if let Some(res) = tcx.evaluation_cache.get(¶m_env.and(trait_ref), tcx) {
880 self.infcx.evaluation_cache.get(¶m_env.and(trait_ref), tcx)
883 fn insert_evaluation_cache(
885 param_env: ty::ParamEnv<'tcx>,
886 trait_ref: ty::PolyTraitRef<'tcx>,
887 dep_node: DepNodeIndex,
888 result: EvaluationResult,
890 // Avoid caching results that depend on more than just the trait-ref
891 // - the stack can create recursion.
892 if result.is_stack_dependent() {
896 if self.can_use_global_caches(param_env) {
897 if !trait_ref.needs_infer() {
899 "insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global",
902 // This may overwrite the cache with the same value
903 // FIXME: Due to #50507 this overwrites the different values
904 // This should be changed to use HashMapExt::insert_same
905 // when that is fixed
906 self.tcx().evaluation_cache.insert(param_env.and(trait_ref), dep_node, result);
911 debug!("insert_evaluation_cache(trait_ref={:?}, candidate={:?})", trait_ref, result,);
912 self.infcx.evaluation_cache.insert(param_env.and(trait_ref), dep_node, result);
915 /// For various reasons, it's possible for a subobligation
916 /// to have a *lower* recursion_depth than the obligation used to create it.
917 /// Projection sub-obligations may be returned from the projection cache,
918 /// which results in obligations with an 'old' `recursion_depth`.
919 /// Additionally, methods like `wf::obligations` and
920 /// `InferCtxt.subtype_predicate` produce subobligations without
921 /// taking in a 'parent' depth, causing the generated subobligations
922 /// to have a `recursion_depth` of `0`.
924 /// To ensure that obligation_depth never decreasees, we force all subobligations
925 /// to have at least the depth of the original obligation.
926 fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(
931 it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
934 /// Checks that the recursion limit has not been exceeded.
936 /// The weird return type of this function allows it to be used with the `try` (`?`)
937 /// operator within certain functions.
938 fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
940 obligation: &Obligation<'tcx, T>,
941 error_obligation: &Obligation<'tcx, V>,
942 ) -> Result<(), OverflowError> {
943 if !self.infcx.tcx.sess.recursion_limit().value_within_limit(obligation.recursion_depth) {
944 match self.query_mode {
945 TraitQueryMode::Standard => {
946 self.infcx().report_overflow_error(error_obligation, true);
948 TraitQueryMode::Canonical => {
949 return Err(OverflowError);
956 fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
958 OP: FnOnce(&mut Self) -> R,
960 let (result, dep_node) =
961 self.tcx().dep_graph.with_anon_task(DepKind::TraitSelect, || op(self));
962 self.tcx().dep_graph.read_index(dep_node);
966 // Treat negative impls as unimplemented, and reservation impls as ambiguity.
967 fn filter_negative_and_reservation_impls(
969 candidate: SelectionCandidate<'tcx>,
970 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
971 if let ImplCandidate(def_id) = candidate {
972 let tcx = self.tcx();
973 match tcx.impl_polarity(def_id) {
974 ty::ImplPolarity::Negative if !self.allow_negative_impls => {
975 return Err(Unimplemented);
977 ty::ImplPolarity::Reservation => {
978 if let Some(intercrate_ambiguity_clauses) =
979 &mut self.intercrate_ambiguity_causes
981 let attrs = tcx.get_attrs(def_id);
982 let attr = tcx.sess.find_by_name(&attrs, sym::rustc_reservation_impl);
983 let value = attr.and_then(|a| a.value_str());
984 if let Some(value) = value {
986 "filter_negative_and_reservation_impls: \
987 reservation impl ambiguity on {:?}",
990 intercrate_ambiguity_clauses.push(
991 IntercrateAmbiguityCause::ReservationImpl {
992 message: value.to_string(),
1005 fn candidate_from_obligation_no_cache<'o>(
1007 stack: &TraitObligationStack<'o, 'tcx>,
1008 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
1009 if let Some(conflict) = self.is_knowable(stack) {
1010 debug!("coherence stage: not knowable");
1011 if self.intercrate_ambiguity_causes.is_some() {
1012 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
1013 // Heuristics: show the diagnostics when there are no candidates in crate.
1014 if let Ok(candidate_set) = self.assemble_candidates(stack) {
1015 let mut no_candidates_apply = true;
1017 for c in candidate_set.vec.iter() {
1018 if self.evaluate_candidate(stack, &c)?.may_apply() {
1019 no_candidates_apply = false;
1024 if !candidate_set.ambiguous && no_candidates_apply {
1025 let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
1026 let self_ty = trait_ref.self_ty();
1027 let trait_desc = trait_ref.print_only_trait_path().to_string();
1028 let self_desc = if self_ty.has_concrete_skeleton() {
1029 Some(self_ty.to_string())
1033 let cause = if let Conflict::Upstream = conflict {
1034 IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc }
1036 IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc }
1038 debug!("evaluate_stack: pushing cause = {:?}", cause);
1039 self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
1046 let candidate_set = self.assemble_candidates(stack)?;
1048 if candidate_set.ambiguous {
1049 debug!("candidate set contains ambig");
1053 let mut candidates = candidate_set.vec;
1055 debug!("assembled {} candidates for {:?}: {:?}", candidates.len(), stack, candidates);
1057 // At this point, we know that each of the entries in the
1058 // candidate set is *individually* applicable. Now we have to
1059 // figure out if they contain mutual incompatibilities. This
1060 // frequently arises if we have an unconstrained input type --
1061 // for example, we are looking for `$0: Eq` where `$0` is some
1062 // unconstrained type variable. In that case, we'll get a
1063 // candidate which assumes $0 == int, one that assumes `$0 ==
1064 // usize`, etc. This spells an ambiguity.
1066 // If there is more than one candidate, first winnow them down
1067 // by considering extra conditions (nested obligations and so
1068 // forth). We don't winnow if there is exactly one
1069 // candidate. This is a relatively minor distinction but it
1070 // can lead to better inference and error-reporting. An
1071 // example would be if there was an impl:
1073 // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
1075 // and we were to see some code `foo.push_clone()` where `boo`
1076 // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
1077 // we were to winnow, we'd wind up with zero candidates.
1078 // Instead, we select the right impl now but report "`Bar` does
1079 // not implement `Clone`".
1080 if candidates.len() == 1 {
1081 return self.filter_negative_and_reservation_impls(candidates.pop().unwrap());
1084 // Winnow, but record the exact outcome of evaluation, which
1085 // is needed for specialization. Propagate overflow if it occurs.
1086 let mut candidates = candidates
1088 .map(|c| match self.evaluate_candidate(stack, &c) {
1089 Ok(eval) if eval.may_apply() => {
1090 Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval }))
1093 Err(OverflowError) => Err(Overflow),
1095 .flat_map(Result::transpose)
1096 .collect::<Result<Vec<_>, _>>()?;
1098 debug!("winnowed to {} candidates for {:?}: {:?}", candidates.len(), stack, candidates);
1100 let needs_infer = stack.obligation.predicate.needs_infer();
1102 // If there are STILL multiple candidates, we can further
1103 // reduce the list by dropping duplicates -- including
1104 // resolving specializations.
1105 if candidates.len() > 1 {
1107 while i < candidates.len() {
1108 let is_dup = (0..candidates.len()).filter(|&j| i != j).any(|j| {
1109 self.candidate_should_be_dropped_in_favor_of(
1116 debug!("Dropping candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]);
1117 candidates.swap_remove(i);
1119 debug!("Retaining candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]);
1122 // If there are *STILL* multiple candidates, give up
1123 // and report ambiguity.
1125 debug!("multiple matches, ambig");
1132 // If there are *NO* candidates, then there are no impls --
1133 // that we know of, anyway. Note that in the case where there
1134 // are unbound type variables within the obligation, it might
1135 // be the case that you could still satisfy the obligation
1136 // from another crate by instantiating the type variables with
1137 // a type from another crate that does have an impl. This case
1138 // is checked for in `evaluate_stack` (and hence users
1139 // who might care about this case, like coherence, should use
1141 if candidates.is_empty() {
1142 // If there's an error type, 'downgrade' our result from
1143 // `Err(Unimplemented)` to `Ok(None)`. This helps us avoid
1144 // emitting additional spurious errors, since we're guaranteed
1145 // to have emitted at least one.
1146 if stack.obligation.references_error() {
1147 debug!("no results for error type, treating as ambiguous");
1150 return Err(Unimplemented);
1153 // Just one candidate left.
1154 self.filter_negative_and_reservation_impls(candidates.pop().unwrap().candidate)
1157 fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
1158 debug!("is_knowable(intercrate={:?})", self.intercrate);
1160 if !self.intercrate {
1164 let obligation = &stack.obligation;
1165 let predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
1167 // Okay to skip binder because of the nature of the
1168 // trait-ref-is-knowable check, which does not care about
1170 let trait_ref = predicate.skip_binder().trait_ref;
1172 coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
1175 /// Returns `true` if the global caches can be used.
1176 /// Do note that if the type itself is not in the
1177 /// global tcx, the local caches will be used.
1178 fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
1179 // If there are any inference variables in the `ParamEnv`, then we
1180 // always use a cache local to this particular scope. Otherwise, we
1181 // switch to a global cache.
1182 if param_env.needs_infer() {
1186 // Avoid using the master cache during coherence and just rely
1187 // on the local cache. This effectively disables caching
1188 // during coherence. It is really just a simplification to
1189 // avoid us having to fear that coherence results "pollute"
1190 // the master cache. Since coherence executes pretty quickly,
1191 // it's not worth going to more trouble to increase the
1192 // hit-rate, I don't think.
1193 if self.intercrate {
1197 // Otherwise, we can use the global cache.
1201 fn check_candidate_cache(
1203 param_env: ty::ParamEnv<'tcx>,
1204 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1205 ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
1206 let tcx = self.tcx();
1207 let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref;
1208 if self.can_use_global_caches(param_env) {
1209 if let Some(res) = tcx.selection_cache.get(¶m_env.and(*trait_ref), tcx) {
1213 self.infcx.selection_cache.get(¶m_env.and(*trait_ref), tcx)
1216 /// Determines whether can we safely cache the result
1217 /// of selecting an obligation. This is almost always `true`,
1218 /// except when dealing with certain `ParamCandidate`s.
1220 /// Ordinarily, a `ParamCandidate` will contain no inference variables,
1221 /// since it was usually produced directly from a `DefId`. However,
1222 /// certain cases (currently only librustdoc's blanket impl finder),
1223 /// a `ParamEnv` may be explicitly constructed with inference types.
1224 /// When this is the case, we do *not* want to cache the resulting selection
1225 /// candidate. This is due to the fact that it might not always be possible
1226 /// to equate the obligation's trait ref and the candidate's trait ref,
1227 /// if more constraints end up getting added to an inference variable.
1229 /// Because of this, we always want to re-run the full selection
1230 /// process for our obligation the next time we see it, since
1231 /// we might end up picking a different `SelectionCandidate` (or none at all).
1232 fn can_cache_candidate(
1234 result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1237 Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.needs_infer(),
1242 fn insert_candidate_cache(
1244 param_env: ty::ParamEnv<'tcx>,
1245 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1246 dep_node: DepNodeIndex,
1247 candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1249 let tcx = self.tcx();
1250 let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref;
1252 if !self.can_cache_candidate(&candidate) {
1254 "insert_candidate_cache(trait_ref={:?}, candidate={:?} -\
1255 candidate is not cacheable",
1256 trait_ref, candidate
1261 if self.can_use_global_caches(param_env) {
1262 if let Err(Overflow) = candidate {
1263 // Don't cache overflow globally; we only produce this in certain modes.
1264 } else if !trait_ref.needs_infer() {
1265 if !candidate.needs_infer() {
1267 "insert_candidate_cache(trait_ref={:?}, candidate={:?}) global",
1268 trait_ref, candidate,
1270 // This may overwrite the cache with the same value.
1271 tcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
1278 "insert_candidate_cache(trait_ref={:?}, candidate={:?}) local",
1279 trait_ref, candidate,
1281 self.infcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
1284 fn match_projection_obligation_against_definition_bounds(
1286 obligation: &TraitObligation<'tcx>,
1288 let poly_trait_predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
1289 let (placeholder_trait_predicate, _) =
1290 self.infcx().replace_bound_vars_with_placeholders(&poly_trait_predicate);
1292 "match_projection_obligation_against_definition_bounds: \
1293 placeholder_trait_predicate={:?}",
1294 placeholder_trait_predicate,
1297 let tcx = self.infcx.tcx;
1298 let predicates = match placeholder_trait_predicate.trait_ref.self_ty().kind {
1299 ty::Projection(ref data) => {
1300 tcx.projection_predicates(data.item_def_id).subst(tcx, data.substs)
1302 ty::Opaque(def_id, substs) => tcx.projection_predicates(def_id).subst(tcx, substs),
1305 obligation.cause.span,
1306 "match_projection_obligation_against_definition_bounds() called \
1307 but self-ty is not a projection: {:?}",
1308 placeholder_trait_predicate.trait_ref.self_ty()
1313 let matching_bound = predicates.iter().find_map(|bound| {
1314 if let ty::PredicateAtom::Trait(pred, _) = bound.skip_binders() {
1315 let bound = ty::Binder::bind(pred.trait_ref);
1316 if self.infcx.probe(|_| {
1317 self.match_projection(obligation, bound, placeholder_trait_predicate.trait_ref)
1326 "match_projection_obligation_against_definition_bounds: \
1327 matching_bound={:?}",
1330 match matching_bound {
1333 // Repeat the successful match, if any, this time outside of a probe.
1335 self.match_projection(obligation, bound, placeholder_trait_predicate.trait_ref);
1343 fn match_projection(
1345 obligation: &TraitObligation<'tcx>,
1346 trait_bound: ty::PolyTraitRef<'tcx>,
1347 placeholder_trait_ref: ty::TraitRef<'tcx>,
1349 debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
1351 .at(&obligation.cause, obligation.param_env)
1352 .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound)
1356 fn evaluate_where_clause<'o>(
1358 stack: &TraitObligationStack<'o, 'tcx>,
1359 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1360 ) -> Result<EvaluationResult, OverflowError> {
1361 self.evaluation_probe(|this| {
1362 match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
1363 Ok(obligations) => {
1364 this.evaluate_predicates_recursively(stack.list(), obligations.into_iter())
1366 Err(()) => Ok(EvaluatedToErr),
1371 ///////////////////////////////////////////////////////////////////////////
1374 // Winnowing is the process of attempting to resolve ambiguity by
1375 // probing further. During the winnowing process, we unify all
1376 // type variables and then we also attempt to evaluate recursive
1377 // bounds to see if they are satisfied.
1379 /// Returns `true` if `victim` should be dropped in favor of
1380 /// `other`. Generally speaking we will drop duplicate
1381 /// candidates and prefer where-clause candidates.
1383 /// See the comment for "SelectionCandidate" for more details.
1384 fn candidate_should_be_dropped_in_favor_of(
1386 victim: &EvaluatedCandidate<'tcx>,
1387 other: &EvaluatedCandidate<'tcx>,
1390 if victim.candidate == other.candidate {
1394 // Check if a bound would previously have been removed when normalizing
1395 // the param_env so that it can be given the lowest priority. See
1396 // #50825 for the motivation for this.
1398 |cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions();
1400 // (*) Prefer `BuiltinCandidate { has_nested: false }` and `DiscriminantKindCandidate`
1401 // to anything else.
1403 // This is a fix for #53123 and prevents winnowing from accidentally extending the
1404 // lifetime of a variable.
1405 match other.candidate {
1407 BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => true,
1408 ParamCandidate(ref cand) => match victim.candidate {
1409 AutoImplCandidate(..) => {
1411 "default implementations shouldn't be recorded \
1412 when there are other valid candidates"
1416 BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => false,
1419 | GeneratorCandidate
1420 | FnPointerCandidate
1421 | BuiltinObjectCandidate
1422 | BuiltinUnsizeCandidate
1423 | BuiltinCandidate { .. }
1424 | TraitAliasCandidate(..) => {
1425 // Global bounds from the where clause should be ignored
1426 // here (see issue #50825). Otherwise, we have a where
1427 // clause so don't go around looking for impls.
1430 ObjectCandidate | ProjectionCandidate => {
1431 // Arbitrarily give param candidates priority
1432 // over projection and object candidates.
1435 ParamCandidate(..) => false,
1437 ObjectCandidate | ProjectionCandidate => match victim.candidate {
1438 AutoImplCandidate(..) => {
1440 "default implementations shouldn't be recorded \
1441 when there are other valid candidates"
1445 BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => false,
1448 | GeneratorCandidate
1449 | FnPointerCandidate
1450 | BuiltinObjectCandidate
1451 | BuiltinUnsizeCandidate
1452 | BuiltinCandidate { .. }
1453 | TraitAliasCandidate(..) => true,
1454 ObjectCandidate | ProjectionCandidate => {
1455 // Arbitrarily give param candidates priority
1456 // over projection and object candidates.
1459 ParamCandidate(ref cand) => is_global(cand),
1461 ImplCandidate(other_def) => {
1462 // See if we can toss out `victim` based on specialization.
1463 // This requires us to know *for sure* that the `other` impl applies
1464 // i.e., `EvaluatedToOk`.
1465 if other.evaluation.must_apply_modulo_regions() {
1466 match victim.candidate {
1467 ImplCandidate(victim_def) => {
1468 let tcx = self.tcx();
1469 if tcx.specializes((other_def, victim_def)) {
1472 return match tcx.impls_are_allowed_to_overlap(other_def, victim_def) {
1473 Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
1474 // Subtle: If the predicate we are evaluating has inference
1475 // variables, do *not* allow discarding candidates due to
1476 // marker trait impls.
1478 // Without this restriction, we could end up accidentally
1479 // constrainting inference variables based on an arbitrarily
1480 // chosen trait impl.
1482 // Imagine we have the following code:
1485 // #[marker] trait MyTrait {}
1486 // impl MyTrait for u8 {}
1487 // impl MyTrait for bool {}
1490 // And we are evaluating the predicate `<_#0t as MyTrait>`.
1492 // During selection, we will end up with one candidate for each
1493 // impl of `MyTrait`. If we were to discard one impl in favor
1494 // of the other, we would be left with one candidate, causing
1495 // us to "successfully" select the predicate, unifying
1496 // _#0t with (for example) `u8`.
1498 // However, we have no reason to believe that this unification
1499 // is correct - we've essentially just picked an arbitrary
1500 // *possibility* for _#0t, and required that this be the *only*
1503 // Eventually, we will either:
1504 // 1) Unify all inference variables in the predicate through
1505 // some other means (e.g. type-checking of a function). We will
1506 // then be in a position to drop marker trait candidates
1507 // without constraining inference variables (since there are
1508 // none left to constrin)
1509 // 2) Be left with some unconstrained inference variables. We
1510 // will then correctly report an inference error, since the
1511 // existence of multiple marker trait impls tells us nothing
1512 // about which one should actually apply.
1519 ParamCandidate(ref cand) => {
1520 // Prefer the impl to a global where clause candidate.
1521 return is_global(cand);
1530 | GeneratorCandidate
1531 | FnPointerCandidate
1532 | BuiltinObjectCandidate
1533 | BuiltinUnsizeCandidate
1534 | BuiltinCandidate { has_nested: true } => {
1535 match victim.candidate {
1536 ParamCandidate(ref cand) => {
1537 // Prefer these to a global where-clause bound
1538 // (see issue #50825).
1539 is_global(cand) && other.evaluation.must_apply_modulo_regions()
1548 fn sized_conditions(
1550 obligation: &TraitObligation<'tcx>,
1551 ) -> BuiltinImplConditions<'tcx> {
1552 use self::BuiltinImplConditions::{Ambiguous, None, Where};
1554 // NOTE: binder moved to (*)
1555 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
1557 match self_ty.kind {
1558 ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
1569 | ty::GeneratorWitness(..)
1574 // safe for everything
1575 Where(ty::Binder::dummy(Vec::new()))
1578 ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
1581 Where(ty::Binder::bind(tys.last().into_iter().map(|k| k.expect_ty()).collect()))
1584 ty::Adt(def, substs) => {
1585 let sized_crit = def.sized_constraint(self.tcx());
1586 // (*) binder moved here
1587 Where(ty::Binder::bind(
1588 sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect(),
1592 ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
1593 ty::Infer(ty::TyVar(_)) => Ambiguous,
1597 | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1598 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
1603 fn copy_clone_conditions(
1605 obligation: &TraitObligation<'tcx>,
1606 ) -> BuiltinImplConditions<'tcx> {
1607 // NOTE: binder moved to (*)
1608 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
1610 use self::BuiltinImplConditions::{Ambiguous, None, Where};
1612 match self_ty.kind {
1613 ty::Infer(ty::IntVar(_))
1614 | ty::Infer(ty::FloatVar(_))
1617 | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
1626 | ty::Ref(_, _, hir::Mutability::Not) => {
1627 // Implementations provided in libcore
1635 | ty::GeneratorWitness(..)
1637 | ty::Ref(_, _, hir::Mutability::Mut) => None,
1639 ty::Array(element_ty, _) => {
1640 // (*) binder moved here
1641 Where(ty::Binder::bind(vec![element_ty]))
1645 // (*) binder moved here
1646 Where(ty::Binder::bind(tys.iter().map(|k| k.expect_ty()).collect()))
1649 ty::Closure(_, substs) => {
1650 // (*) binder moved here
1651 Where(ty::Binder::bind(substs.as_closure().upvar_tys().collect()))
1654 ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
1655 // Fallback to whatever user-defined impls exist in this case.
1659 ty::Infer(ty::TyVar(_)) => {
1660 // Unbound type variable. Might or might not have
1661 // applicable impls and so forth, depending on what
1662 // those type variables wind up being bound to.
1668 | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1669 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
1674 /// For default impls, we need to break apart a type into its
1675 /// "constituent types" -- meaning, the types that it contains.
1677 /// Here are some (simple) examples:
1680 /// (i32, u32) -> [i32, u32]
1681 /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
1682 /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
1683 /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
1685 fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
1695 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
1697 | ty::Char => Vec::new(),
1703 | ty::Projection(..)
1705 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1706 bug!("asked to assemble constituent types of unexpected type: {:?}", t);
1709 ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => {
1713 ty::Array(element_ty, _) | ty::Slice(element_ty) => vec![element_ty],
1715 ty::Tuple(ref tys) => {
1716 // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
1717 tys.iter().map(|k| k.expect_ty()).collect()
1720 ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().collect(),
1722 ty::Generator(_, ref substs, _) => {
1723 let witness = substs.as_generator().witness();
1724 substs.as_generator().upvar_tys().chain(iter::once(witness)).collect()
1727 ty::GeneratorWitness(types) => {
1728 // This is sound because no regions in the witness can refer to
1729 // the binder outside the witness. So we'll effectivly reuse
1730 // the implicit binder around the witness.
1731 types.skip_binder().to_vec()
1734 // For `PhantomData<T>`, we pass `T`.
1735 ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(),
1737 ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(),
1739 ty::Opaque(def_id, substs) => {
1740 // We can resolve the `impl Trait` to its concrete type,
1741 // which enforces a DAG between the functions requiring
1742 // the auto trait bounds in question.
1743 vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)]
1748 fn collect_predicates_for_types(
1750 param_env: ty::ParamEnv<'tcx>,
1751 cause: ObligationCause<'tcx>,
1752 recursion_depth: usize,
1753 trait_def_id: DefId,
1754 types: ty::Binder<Vec<Ty<'tcx>>>,
1755 ) -> Vec<PredicateObligation<'tcx>> {
1756 // Because the types were potentially derived from
1757 // higher-ranked obligations they may reference late-bound
1758 // regions. For example, `for<'a> Foo<&'a i32> : Copy` would
1759 // yield a type like `for<'a> &'a i32`. In general, we
1760 // maintain the invariant that we never manipulate bound
1761 // regions, so we have to process these bound regions somehow.
1763 // The strategy is to:
1765 // 1. Instantiate those regions to placeholder regions (e.g.,
1766 // `for<'a> &'a i32` becomes `&0 i32`.
1767 // 2. Produce something like `&'0 i32 : Copy`
1768 // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
1771 .skip_binder() // binder moved -\
1774 let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/
1776 self.infcx.commit_unconditionally(|_| {
1777 let (placeholder_ty, _) = self.infcx.replace_bound_vars_with_placeholders(&ty);
1778 let Normalized { value: normalized_ty, mut obligations } =
1779 ensure_sufficient_stack(|| {
1780 project::normalize_with_depth(
1788 let placeholder_obligation = predicate_for_trait_def(
1797 obligations.push(placeholder_obligation);
1804 ///////////////////////////////////////////////////////////////////////////
1807 // Matching is a common path used for both evaluation and
1808 // confirmation. It basically unifies types that appear in impls
1809 // and traits. This does affect the surrounding environment;
1810 // therefore, when used during evaluation, match routines must be
1811 // run inside of a `probe()` so that their side-effects are
1817 obligation: &TraitObligation<'tcx>,
1818 ) -> Normalized<'tcx, SubstsRef<'tcx>> {
1819 match self.match_impl(impl_def_id, obligation) {
1820 Ok(substs) => substs,
1823 "Impl {:?} was matchable against {:?} but now is not",
1834 obligation: &TraitObligation<'tcx>,
1835 ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
1836 let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
1838 // Before we create the substitutions and everything, first
1839 // consider a "quick reject". This avoids creating more types
1840 // and so forth that we need to.
1841 if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
1845 let (placeholder_obligation, _) =
1846 self.infcx().replace_bound_vars_with_placeholders(&obligation.predicate);
1847 let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
1849 let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
1851 let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
1853 let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
1854 ensure_sufficient_stack(|| {
1855 project::normalize_with_depth(
1857 obligation.param_env,
1858 obligation.cause.clone(),
1859 obligation.recursion_depth + 1,
1865 "match_impl(impl_def_id={:?}, obligation={:?}, \
1866 impl_trait_ref={:?}, placeholder_obligation_trait_ref={:?})",
1867 impl_def_id, obligation, impl_trait_ref, placeholder_obligation_trait_ref
1870 let InferOk { obligations, .. } = self
1872 .at(&obligation.cause, obligation.param_env)
1873 .eq(placeholder_obligation_trait_ref, impl_trait_ref)
1874 .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
1875 nested_obligations.extend(obligations);
1878 && self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation
1880 debug!("match_impl: reservation impls only apply in intercrate mode");
1884 debug!("match_impl: success impl_substs={:?}", impl_substs);
1885 Ok(Normalized { value: impl_substs, obligations: nested_obligations })
1888 fn fast_reject_trait_refs(
1890 obligation: &TraitObligation<'_>,
1891 impl_trait_ref: &ty::TraitRef<'_>,
1893 // We can avoid creating type variables and doing the full
1894 // substitution if we find that any of the input types, when
1895 // simplified, do not match.
1897 obligation.predicate.skip_binder().trait_ref.substs.iter().zip(impl_trait_ref.substs).any(
1898 |(obligation_arg, impl_arg)| {
1899 match (obligation_arg.unpack(), impl_arg.unpack()) {
1900 (GenericArgKind::Type(obligation_ty), GenericArgKind::Type(impl_ty)) => {
1901 let simplified_obligation_ty =
1902 fast_reject::simplify_type(self.tcx(), obligation_ty, true);
1903 let simplified_impl_ty =
1904 fast_reject::simplify_type(self.tcx(), impl_ty, false);
1906 simplified_obligation_ty.is_some()
1907 && simplified_impl_ty.is_some()
1908 && simplified_obligation_ty != simplified_impl_ty
1910 (GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => {
1911 // Lifetimes can never cause a rejection.
1914 (GenericArgKind::Const(_), GenericArgKind::Const(_)) => {
1915 // Conservatively ignore consts (i.e. assume they might
1916 // unify later) until we have `fast_reject` support for
1917 // them (if we'll ever need it, even).
1920 _ => unreachable!(),
1926 /// Normalize `where_clause_trait_ref` and try to match it against
1927 /// `obligation`. If successful, return any predicates that
1928 /// result from the normalization. Normalization is necessary
1929 /// because where-clauses are stored in the parameter environment
1931 fn match_where_clause_trait_ref(
1933 obligation: &TraitObligation<'tcx>,
1934 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1935 ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
1936 self.match_poly_trait_ref(obligation, where_clause_trait_ref)
1939 /// Returns `Ok` if `poly_trait_ref` being true implies that the
1940 /// obligation is satisfied.
1941 fn match_poly_trait_ref(
1943 obligation: &TraitObligation<'tcx>,
1944 poly_trait_ref: ty::PolyTraitRef<'tcx>,
1945 ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
1947 "match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}",
1948 obligation, poly_trait_ref
1952 .at(&obligation.cause, obligation.param_env)
1953 .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref)
1954 .map(|InferOk { obligations, .. }| obligations)
1958 ///////////////////////////////////////////////////////////////////////////
1961 fn match_fresh_trait_refs(
1963 previous: ty::PolyTraitRef<'tcx>,
1964 current: ty::PolyTraitRef<'tcx>,
1965 param_env: ty::ParamEnv<'tcx>,
1967 let mut matcher = ty::_match::Match::new(self.tcx(), param_env);
1968 matcher.relate(previous, current).is_ok()
1973 previous_stack: TraitObligationStackList<'o, 'tcx>,
1974 obligation: &'o TraitObligation<'tcx>,
1975 ) -> TraitObligationStack<'o, 'tcx> {
1976 let fresh_trait_ref =
1977 obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener);
1979 let dfn = previous_stack.cache.next_dfn();
1980 let depth = previous_stack.depth() + 1;
1981 TraitObligationStack {
1984 reached_depth: Cell::new(depth),
1985 previous: previous_stack,
1991 fn closure_trait_ref_unnormalized(
1993 obligation: &TraitObligation<'tcx>,
1994 substs: SubstsRef<'tcx>,
1995 ) -> ty::PolyTraitRef<'tcx> {
1996 debug!("closure_trait_ref_unnormalized(obligation={:?}, substs={:?})", obligation, substs);
1997 let closure_sig = substs.as_closure().sig();
1999 debug!("closure_trait_ref_unnormalized: closure_sig = {:?}", closure_sig);
2001 // (1) Feels icky to skip the binder here, but OTOH we know
2002 // that the self-type is an unboxed closure type and hence is
2003 // in fact unparameterized (or at least does not reference any
2004 // regions bound in the obligation). Still probably some
2005 // refactoring could make this nicer.
2006 closure_trait_ref_and_return_type(
2008 obligation.predicate.def_id(),
2009 obligation.predicate.skip_binder().self_ty(), // (1)
2011 util::TupleArgumentsFlag::No,
2013 .map_bound(|(trait_ref, _)| trait_ref)
2016 fn generator_trait_ref_unnormalized(
2018 obligation: &TraitObligation<'tcx>,
2019 substs: SubstsRef<'tcx>,
2020 ) -> ty::PolyTraitRef<'tcx> {
2021 let gen_sig = substs.as_generator().poly_sig();
2023 // (1) Feels icky to skip the binder here, but OTOH we know
2024 // that the self-type is an generator type and hence is
2025 // in fact unparameterized (or at least does not reference any
2026 // regions bound in the obligation). Still probably some
2027 // refactoring could make this nicer.
2029 super::util::generator_trait_ref_and_outputs(
2031 obligation.predicate.def_id(),
2032 obligation.predicate.skip_binder().self_ty(), // (1)
2035 .map_bound(|(trait_ref, ..)| trait_ref)
2038 /// Returns the obligations that are implied by instantiating an
2039 /// impl or trait. The obligations are substituted and fully
2040 /// normalized. This is used when confirming an impl or default
2042 fn impl_or_trait_obligations(
2044 cause: ObligationCause<'tcx>,
2045 recursion_depth: usize,
2046 param_env: ty::ParamEnv<'tcx>,
2047 def_id: DefId, // of impl or trait
2048 substs: SubstsRef<'tcx>, // for impl or trait
2049 ) -> Vec<PredicateObligation<'tcx>> {
2050 debug!("impl_or_trait_obligations(def_id={:?})", def_id);
2051 let tcx = self.tcx();
2053 // To allow for one-pass evaluation of the nested obligation,
2054 // each predicate must be preceded by the obligations required
2056 // for example, if we have:
2057 // impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
2058 // the impl will have the following predicates:
2059 // <V as Iterator>::Item = U,
2060 // U: Iterator, U: Sized,
2061 // V: Iterator, V: Sized,
2062 // <U as Iterator>::Item: Copy
2063 // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
2064 // obligation will normalize to `<$0 as Iterator>::Item = $1` and
2065 // `$1: Copy`, so we must ensure the obligations are emitted in
2067 let predicates = tcx.predicates_of(def_id);
2068 assert_eq!(predicates.parent, None);
2069 let mut obligations = Vec::with_capacity(predicates.predicates.len());
2070 for (predicate, _) in predicates.predicates {
2071 let predicate = normalize_with_depth_to(
2076 &predicate.subst(tcx, substs),
2079 obligations.push(Obligation {
2080 cause: cause.clone(),
2087 // We are performing deduplication here to avoid exponential blowups
2088 // (#38528) from happening, but the real cause of the duplication is
2089 // unknown. What we know is that the deduplication avoids exponential
2090 // amount of predicates being propagated when processing deeply nested
2093 // This code is hot enough that it's worth avoiding the allocation
2094 // required for the FxHashSet when possible. Special-casing lengths 0,
2095 // 1 and 2 covers roughly 75-80% of the cases.
2096 if obligations.len() <= 1 {
2097 // No possibility of duplicates.
2098 } else if obligations.len() == 2 {
2099 // Only two elements. Drop the second if they are equal.
2100 if obligations[0] == obligations[1] {
2101 obligations.truncate(1);
2104 // Three or more elements. Use a general deduplication process.
2105 let mut seen = FxHashSet::default();
2106 obligations.retain(|i| seen.insert(i.clone()));
2113 trait TraitObligationExt<'tcx> {
2116 variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
2117 ) -> ObligationCause<'tcx>;
2120 impl<'tcx> TraitObligationExt<'tcx> for TraitObligation<'tcx> {
2121 #[allow(unused_comparisons)]
2124 variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
2125 ) -> ObligationCause<'tcx> {
2127 * Creates a cause for obligations that are derived from
2128 * `obligation` by a recursive search (e.g., for a builtin
2129 * bound, or eventually a `auto trait Foo`). If `obligation`
2130 * is itself a derived obligation, this is just a clone, but
2131 * otherwise we create a "derived obligation" cause so as to
2132 * keep track of the original root obligation for error
2136 let obligation = self;
2138 // NOTE(flaper87): As of now, it keeps track of the whole error
2139 // chain. Ideally, we should have a way to configure this either
2140 // by using -Z verbose or just a CLI argument.
2141 let derived_cause = DerivedObligationCause {
2142 parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
2143 parent_code: Rc::new(obligation.cause.code.clone()),
2145 let derived_code = variant(derived_cause);
2146 ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code)
2150 impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
2151 fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2152 TraitObligationStackList::with(self)
2155 fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
2159 fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2163 /// Indicates that attempting to evaluate this stack entry
2164 /// required accessing something from the stack at depth `reached_depth`.
2165 fn update_reached_depth(&self, reached_depth: usize) {
2167 self.depth > reached_depth,
2168 "invoked `update_reached_depth` with something under this stack: \
2169 self.depth={} reached_depth={}",
2173 debug!("update_reached_depth(reached_depth={})", reached_depth);
2175 while reached_depth < p.depth {
2176 debug!("update_reached_depth: marking {:?} as cycle participant", p.fresh_trait_ref);
2177 p.reached_depth.set(p.reached_depth.get().min(reached_depth));
2178 p = p.previous.head.unwrap();
2183 /// The "provisional evaluation cache" is used to store intermediate cache results
2184 /// when solving auto traits. Auto traits are unusual in that they can support
2185 /// cycles. So, for example, a "proof tree" like this would be ok:
2187 /// - `Foo<T>: Send` :-
2188 /// - `Bar<T>: Send` :-
2189 /// - `Foo<T>: Send` -- cycle, but ok
2190 /// - `Baz<T>: Send`
2192 /// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
2193 /// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
2194 /// For non-auto traits, this cycle would be an error, but for auto traits (because
2195 /// they are coinductive) it is considered ok.
2197 /// However, there is a complication: at the point where we have
2198 /// "proven" `Bar<T>: Send`, we have in fact only proven it
2199 /// *provisionally*. In particular, we proved that `Bar<T>: Send`
2200 /// *under the assumption* that `Foo<T>: Send`. But what if we later
2201 /// find out this assumption is wrong? Specifically, we could
2202 /// encounter some kind of error proving `Baz<T>: Send`. In that case,
2203 /// `Bar<T>: Send` didn't turn out to be true.
2205 /// In Issue #60010, we found a bug in rustc where it would cache
2206 /// these intermediate results. This was fixed in #60444 by disabling
2207 /// *all* caching for things involved in a cycle -- in our example,
2208 /// that would mean we don't cache that `Bar<T>: Send`. But this led
2209 /// to large slowdowns.
2211 /// Specifically, imagine this scenario, where proving `Baz<T>: Send`
2212 /// first requires proving `Bar<T>: Send` (which is true:
2214 /// - `Foo<T>: Send` :-
2215 /// - `Bar<T>: Send` :-
2216 /// - `Foo<T>: Send` -- cycle, but ok
2217 /// - `Baz<T>: Send`
2218 /// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
2219 /// - `*const T: Send` -- but what if we later encounter an error?
2221 /// The *provisional evaluation cache* resolves this issue. It stores
2222 /// cache results that we've proven but which were involved in a cycle
2223 /// in some way. We track the minimal stack depth (i.e., the
2224 /// farthest from the top of the stack) that we are dependent on.
2225 /// The idea is that the cache results within are all valid -- so long as
2226 /// none of the nodes in between the current node and the node at that minimum
2227 /// depth result in an error (in which case the cached results are just thrown away).
2229 /// During evaluation, we consult this provisional cache and rely on
2230 /// it. Accessing a cached value is considered equivalent to accessing
2231 /// a result at `reached_depth`, so it marks the *current* solution as
2232 /// provisional as well. If an error is encountered, we toss out any
2233 /// provisional results added from the subtree that encountered the
2234 /// error. When we pop the node at `reached_depth` from the stack, we
2235 /// can commit all the things that remain in the provisional cache.
2236 struct ProvisionalEvaluationCache<'tcx> {
2237 /// next "depth first number" to issue -- just a counter
2240 /// Stores the "coldest" depth (bottom of stack) reached by any of
2241 /// the evaluation entries. The idea here is that all things in the provisional
2242 /// cache are always dependent on *something* that is colder in the stack:
2243 /// therefore, if we add a new entry that is dependent on something *colder still*,
2244 /// we have to modify the depth for all entries at once.
2248 /// Imagine we have a stack `A B C D E` (with `E` being the top of
2249 /// the stack). We cache something with depth 2, which means that
2250 /// it was dependent on C. Then we pop E but go on and process a
2251 /// new node F: A B C D F. Now F adds something to the cache with
2252 /// depth 1, meaning it is dependent on B. Our original cache
2253 /// entry is also dependent on B, because there is a path from E
2254 /// to C and then from C to F and from F to B.
2255 reached_depth: Cell<usize>,
2257 /// Map from cache key to the provisionally evaluated thing.
2258 /// The cache entries contain the result but also the DFN in which they
2259 /// were added. The DFN is used to clear out values on failure.
2261 /// Imagine we have a stack like:
2263 /// - `A B C` and we add a cache for the result of C (DFN 2)
2264 /// - Then we have a stack `A B D` where `D` has DFN 3
2265 /// - We try to solve D by evaluating E: `A B D E` (DFN 4)
2266 /// - `E` generates various cache entries which have cyclic dependices on `B`
2267 /// - `A B D E F` and so forth
2268 /// - the DFN of `F` for example would be 5
2269 /// - then we determine that `E` is in error -- we will then clear
2270 /// all cache values whose DFN is >= 4 -- in this case, that
2271 /// means the cached value for `F`.
2272 map: RefCell<FxHashMap<ty::PolyTraitRef<'tcx>, ProvisionalEvaluation>>,
2275 /// A cache value for the provisional cache: contains the depth-first
2276 /// number (DFN) and result.
2277 #[derive(Copy, Clone, Debug)]
2278 struct ProvisionalEvaluation {
2280 result: EvaluationResult,
2283 impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
2284 fn default() -> Self {
2285 Self { dfn: Cell::new(0), reached_depth: Cell::new(usize::MAX), map: Default::default() }
2289 impl<'tcx> ProvisionalEvaluationCache<'tcx> {
2290 /// Get the next DFN in sequence (basically a counter).
2291 fn next_dfn(&self) -> usize {
2292 let result = self.dfn.get();
2293 self.dfn.set(result + 1);
2297 /// Check the provisional cache for any result for
2298 /// `fresh_trait_ref`. If there is a hit, then you must consider
2299 /// it an access to the stack slots at depth
2300 /// `self.current_reached_depth()` and above.
2301 fn get_provisional(&self, fresh_trait_ref: ty::PolyTraitRef<'tcx>) -> Option<EvaluationResult> {
2303 "get_provisional(fresh_trait_ref={:?}) = {:#?} with reached-depth {}",
2305 self.map.borrow().get(&fresh_trait_ref),
2306 self.reached_depth.get(),
2308 Some(self.map.borrow().get(&fresh_trait_ref)?.result)
2311 /// Current value of the `reached_depth` counter -- all the
2312 /// provisional cache entries are dependent on the item at this
2314 fn current_reached_depth(&self) -> usize {
2315 self.reached_depth.get()
2318 /// Insert a provisional result into the cache. The result came
2319 /// from the node with the given DFN. It accessed a minimum depth
2320 /// of `reached_depth` to compute. It evaluated `fresh_trait_ref`
2321 /// and resulted in `result`.
2322 fn insert_provisional(
2325 reached_depth: usize,
2326 fresh_trait_ref: ty::PolyTraitRef<'tcx>,
2327 result: EvaluationResult,
2330 "insert_provisional(from_dfn={}, reached_depth={}, fresh_trait_ref={:?}, result={:?})",
2331 from_dfn, reached_depth, fresh_trait_ref, result,
2333 let r_d = self.reached_depth.get();
2334 self.reached_depth.set(r_d.min(reached_depth));
2336 debug!("insert_provisional: reached_depth={:?}", self.reached_depth.get());
2338 self.map.borrow_mut().insert(fresh_trait_ref, ProvisionalEvaluation { from_dfn, result });
2341 /// Invoked when the node with dfn `dfn` does not get a successful
2342 /// result. This will clear out any provisional cache entries
2343 /// that were added since `dfn` was created. This is because the
2344 /// provisional entries are things which must assume that the
2345 /// things on the stack at the time of their creation succeeded --
2346 /// since the failing node is presently at the top of the stack,
2347 /// these provisional entries must either depend on it or some
2349 fn on_failure(&self, dfn: usize) {
2350 debug!("on_failure(dfn={:?})", dfn,);
2351 self.map.borrow_mut().retain(|key, eval| {
2352 if !eval.from_dfn >= dfn {
2353 debug!("on_failure: removing {:?}", key);
2361 /// Invoked when the node at depth `depth` completed without
2362 /// depending on anything higher in the stack (if that completion
2363 /// was a failure, then `on_failure` should have been invoked
2364 /// already). The callback `op` will be invoked for each
2365 /// provisional entry that we can now confirm.
2369 mut op: impl FnMut(ty::PolyTraitRef<'tcx>, EvaluationResult),
2371 debug!("on_completion(depth={}, reached_depth={})", depth, self.reached_depth.get(),);
2373 if self.reached_depth.get() < depth {
2374 debug!("on_completion: did not yet reach depth to complete");
2378 for (fresh_trait_ref, eval) in self.map.borrow_mut().drain() {
2379 debug!("on_completion: fresh_trait_ref={:?} eval={:?}", fresh_trait_ref, eval,);
2381 op(fresh_trait_ref, eval.result);
2384 self.reached_depth.set(usize::MAX);
2388 #[derive(Copy, Clone)]
2389 struct TraitObligationStackList<'o, 'tcx> {
2390 cache: &'o ProvisionalEvaluationCache<'tcx>,
2391 head: Option<&'o TraitObligationStack<'o, 'tcx>>,
2394 impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
2395 fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
2396 TraitObligationStackList { cache, head: None }
2399 fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
2400 TraitObligationStackList { cache: r.cache(), head: Some(r) }
2403 fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
2407 fn depth(&self) -> usize {
2408 if let Some(head) = self.head { head.depth } else { 0 }
2412 impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
2413 type Item = &'o TraitObligationStack<'o, 'tcx>;
2415 fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
2426 impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
2427 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2428 write!(f, "TraitObligationStack({:?})", self.obligation)