Ok(Some(candidate))
}
- fn pick_candidate_cache(&self,
- cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>)
- -> &SelectionCache<'tcx>
- {
- // High-level idea: we have to decide whether to consult the
- // cache that is specific to this scope, or to consult the
- // global cache. We want the cache that is specific to this
- // scope whenever where clauses might affect the result.
+ fn pick_candidate_cache(&self) -> &SelectionCache<'tcx> {
+ // If there are any where-clauses in scope, then we always use
+ // a cache local to this particular scope. Otherwise, we
+ // switch to a global cache. We used to try and draw
+ // finer-grained distinctions, but that led to a serious of
+ // annoying and weird bugs like #22019 and #18290. This simple
+ // rule seems to be pretty clearly safe and also still retains
+ // a very high hit rate (~95% when compiling rustc).
+ if !self.param_env().caller_bounds.is_empty() {
+ return &self.param_env().selection_cache;
+ }
// Avoid using the master cache during coherence and just rely
// on the local cache. This effectively disables caching
return &self.param_env().selection_cache;
}
- // If the trait refers to any parameters in scope, then use
- // the cache of the param-environment.
- if
- cache_fresh_trait_pred.0.input_types().iter().any(
- |&t| ty::type_has_self(t) || ty::type_has_params(t))
- {
- return &self.param_env().selection_cache;
- }
-
- // If the trait refers to unbound type variables, and there
- // are where clauses in scope, then use the local environment.
- // If there are no where clauses in scope, which is a very
- // common case, then we can use the global environment.
- // See the discussion in doc.rs for more details.
- if
- !self.param_env().caller_bounds.is_empty() &&
- cache_fresh_trait_pred.0.input_types().iter().any(
- |&t| ty::type_has_ty_infer(t))
- {
- return &self.param_env().selection_cache;
- }
-
// Otherwise, we can use the global cache.
&self.tcx().selection_cache
}
cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>)
-> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>>
{
- let cache = self.pick_candidate_cache(cache_fresh_trait_pred);
+ let cache = self.pick_candidate_cache();
let hashmap = cache.hashmap.borrow();
hashmap.get(&cache_fresh_trait_pred.0.trait_ref).map(|c| (*c).clone())
}
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>)
{
- let cache = self.pick_candidate_cache(&cache_fresh_trait_pred);
+ let cache = self.pick_candidate_cache();
let mut hashmap = cache.hashmap.borrow_mut();
hashmap.insert(cache_fresh_trait_pred.0.trait_ref.clone(), candidate);
}
-> bool
{
// In general, it's a good idea to cache results, even
- // ambigious ones, to save us some trouble later. But we have
+ // ambiguous ones, to save us some trouble later. But we have
// to be careful not to cache results that could be
// invalidated later by advances in inference. Normally, this
// is not an issue, because any inference variables whose
_ => {
self.tcx().sess.span_bug(
obligation.cause.span,
- format!("match_projection_obligation_against_bounds_from_trait() called \
- but self-ty not a projection: {}",
- skol_trait_predicate.trait_ref.self_ty().repr(self.tcx())).as_slice());
+ &format!("match_projection_obligation_against_bounds_from_trait() called \
+ but self-ty not a projection: {}",
+ skol_trait_predicate.trait_ref.self_ty().repr(self.tcx())));
}
};
debug!("match_projection_obligation_against_bounds_from_trait: \
///
/// - The impl is conditional, in which case we may not have winnowed it out
/// because we don't know if the conditions apply, but the where clause is basically
- /// telling us taht there is some impl, though not necessarily the one we see.
+ /// telling us that there is some impl, though not necessarily the one we see.
///
/// In both cases we prefer to take the where clause, which is
/// essentially harmless. See issue #18453 for more details of
// the where clauses are in scope.
true
}
- (&ParamCandidate(ref bound1), &ParamCandidate(ref bound2)) => {
- self.infcx.probe(|_| {
- let bound1 =
- project::normalize_with_depth(self,
- stack.obligation.cause.clone(),
- stack.obligation.recursion_depth+1,
- bound1);
- let bound2 =
- project::normalize_with_depth(self,
- stack.obligation.cause.clone(),
- stack.obligation.recursion_depth+1,
- bound2);
- let origin =
- infer::RelateOutputImplTypes(stack.obligation.cause.span);
- self.infcx
- .sub_poly_trait_refs(false, origin, bound1.value, bound2.value)
- .is_ok()
- })
- }
_ => {
false
}
return Ok(ParameterBuiltin);
}
+ // Upvars are always local variables or references to
+ // local variables, and local variables cannot be
+ // unsized, so the closure struct as a whole must be
+ // Sized.
+ if bound == ty::BoundSized {
+ return Ok(If(Vec::new()));
+ }
+
match self.closure_typer.closure_upvars(def_id, substs) {
Some(upvars) => {
Ok(If(upvars.iter().map(|c| c.ty).collect()))
Ok(obligations) => obligations,
Err(()) => {
self.tcx().sess.bug(
- format!("Where clause `{}` was applicable to `{}` but now is not",
- param.repr(self.tcx()),
- obligation.repr(self.tcx())).as_slice());
+ &format!("Where clause `{}` was applicable to `{}` but now is not",
+ param.repr(self.tcx()),
+ obligation.repr(self.tcx())));
}
}
}
Some(r) => r,
None => {
self.tcx().sess.span_bug(obligation.cause.span,
- format!("unable to upcast from {} to {}",
- poly_trait_ref.repr(self.tcx()),
- obligation_def_id.repr(self.tcx())).as_slice());
+ &format!("unable to upcast from {} to {}",
+ poly_trait_ref.repr(self.tcx()),
+ obligation_def_id.repr(self.tcx())));
}
};