1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
4 use crate::hir::def::DefKind;
5 use crate::hir::def_id::DefId;
6 use crate::hir::map::DefPathData;
7 use crate::ich::NodeIdHashingMode;
8 use crate::middle::lang_items;
9 use crate::mir::interpret::{sign_extend, truncate};
10 use crate::traits::{self, ObligationCause};
11 use crate::ty::layout::{Integer, IntegerExt};
12 use crate::ty::query::TyCtxtAt;
13 use crate::ty::subst::{GenericArgKind, InternalSubsts, Subst, SubstsRef};
14 use crate::ty::TyKind::*;
15 use crate::ty::{self, DefIdTree, GenericParamDefKind, Ty, TyCtxt, TypeFoldable};
16 use crate::util::common::ErrorReported;
18 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
19 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
20 use rustc_macros::HashStable;
23 use syntax::attr::{self, SignedInt, UnsignedInt};
24 use syntax_pos::{Span, DUMMY_SP};
26 #[derive(Copy, Clone, Debug)]
27 pub struct Discr<'tcx> {
28 /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
33 impl<'tcx> fmt::Display for Discr<'tcx> {
34 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
37 let size = ty::tls::with(|tcx| Integer::from_attr(&tcx, SignedInt(ity)).size());
39 // sign extend the raw representation to be an i128
40 let x = sign_extend(x, size) as i128;
43 _ => write!(fmt, "{}", self.val),
48 impl<'tcx> Discr<'tcx> {
49 /// Adds `1` to the value and wraps around if the maximum for the type is reached.
50 pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
51 self.checked_add(tcx, 1).0
53 pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
54 let (int, signed) = match self.ty.kind {
55 Int(ity) => (Integer::from_attr(&tcx, SignedInt(ity)), true),
56 Uint(uty) => (Integer::from_attr(&tcx, UnsignedInt(uty)), false),
57 _ => bug!("non integer discriminant"),
60 let size = int.size();
61 let bit_size = int.size().bits();
62 let shift = 128 - bit_size;
64 let sext = |u| sign_extend(u, size) as i128;
65 let min = sext(1_u128 << (bit_size - 1));
66 let max = i128::max_value() >> shift;
67 let val = sext(self.val);
68 assert!(n < (i128::max_value() as u128));
70 let oflo = val > max - n;
71 let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
72 // zero the upper bits
73 let val = val as u128;
74 let val = truncate(val, size);
75 (Self { val: val as u128, ty: self.ty }, oflo)
77 let max = u128::max_value() >> shift;
79 let oflo = val > max - n;
80 let val = if oflo { n - (max - val) - 1 } else { val + n };
81 (Self { val: val, ty: self.ty }, oflo)
86 pub trait IntTypeExt {
87 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
88 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
89 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
92 impl IntTypeExt for attr::IntType {
93 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
95 SignedInt(ast::IntTy::I8) => tcx.types.i8,
96 SignedInt(ast::IntTy::I16) => tcx.types.i16,
97 SignedInt(ast::IntTy::I32) => tcx.types.i32,
98 SignedInt(ast::IntTy::I64) => tcx.types.i64,
99 SignedInt(ast::IntTy::I128) => tcx.types.i128,
100 SignedInt(ast::IntTy::Isize) => tcx.types.isize,
101 UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
102 UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
103 UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
104 UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
105 UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
106 UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
110 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
111 Discr { val: 0, ty: self.to_ty(tcx) }
114 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
115 if let Some(val) = val {
116 assert_eq!(self.to_ty(tcx), val.ty);
117 let (new, oflo) = val.checked_add(tcx, 1);
118 if oflo { None } else { Some(new) }
120 Some(self.initial_discriminant(tcx))
126 pub enum CopyImplementationError<'tcx> {
127 InfrigingFields(Vec<&'tcx ty::FieldDef>),
132 /// Describes whether a type is representable. For types that are not
133 /// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
134 /// distinguish between types that are recursive with themselves and types that
135 /// contain a different recursive type. These cases can therefore be treated
136 /// differently when reporting errors.
138 /// The ordering of the cases is significant. They are sorted so that cmp::max
139 /// will keep the "more erroneous" of two values.
140 #[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
141 pub enum Representability {
144 SelfRecursive(Vec<Span>),
147 impl<'tcx> ty::ParamEnv<'tcx> {
148 pub fn can_type_implement_copy(
152 ) -> Result<(), CopyImplementationError<'tcx>> {
153 // FIXME: (@jroesch) float this code up
154 tcx.infer_ctxt().enter(|infcx| {
155 let (adt, substs) = match self_type.kind {
156 // These types used to have a builtin impl.
157 // Now libcore provides that impl.
165 | ty::Ref(_, _, hir::Mutability::Not) => return Ok(()),
167 ty::Adt(adt, substs) => (adt, substs),
169 _ => return Err(CopyImplementationError::NotAnAdt),
172 let mut infringing = Vec::new();
173 for variant in &adt.variants {
174 for field in &variant.fields {
175 let ty = field.ty(tcx, substs);
176 if ty.references_error() {
179 let span = tcx.def_span(field.did);
180 let cause = ObligationCause { span, ..ObligationCause::dummy() };
181 let ctx = traits::FulfillmentContext::new();
182 match traits::fully_normalize(&infcx, ctx, cause, self, &ty) {
184 if !infcx.type_is_copy_modulo_regions(self, ty, span) {
185 infringing.push(field);
189 infcx.report_fulfillment_errors(&errors, None, false);
194 if !infringing.is_empty() {
195 return Err(CopyImplementationError::InfrigingFields(infringing));
197 if adt.has_dtor(tcx) {
198 return Err(CopyImplementationError::HasDestructor);
206 impl<'tcx> TyCtxt<'tcx> {
207 /// Creates a hash of the type `Ty` which will be the same no matter what crate
208 /// context it's calculated within. This is used by the `type_id` intrinsic.
209 pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
210 let mut hasher = StableHasher::new();
211 let mut hcx = self.create_stable_hashing_context();
213 // We want the type_id be independent of the types free regions, so we
214 // erase them. The erase_regions() call will also anonymize bound
215 // regions, which is desirable too.
216 let ty = self.erase_regions(&ty);
218 hcx.while_hashing_spans(false, |hcx| {
219 hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
220 ty.hash_stable(hcx, &mut hasher);
227 impl<'tcx> TyCtxt<'tcx> {
228 pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
229 if let ty::Adt(def, substs) = ty.kind {
230 for field in def.all_fields() {
231 let field_ty = field.ty(self, substs);
232 if let Error = field_ty.kind {
240 /// Attempts to returns the deeply last field of nested structures, but
241 /// does not apply any normalization in its search. Returns the same type
242 /// if input `ty` is not a structure at all.
243 pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
245 tcx.struct_tail_with_normalize(ty, |ty| ty)
248 /// Returns the deeply last field of nested structures, or the same type if
249 /// not a structure at all. Corresponds to the only possible unsized field,
250 /// and its type can be used to determine unsizing strategy.
252 /// Should only be called if `ty` has no inference variables and does not
253 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
254 /// normalization attempt may cause compiler bugs.
255 pub fn struct_tail_erasing_lifetimes(
258 param_env: ty::ParamEnv<'tcx>,
261 tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty))
264 /// Returns the deeply last field of nested structures, or the same type if
265 /// not a structure at all. Corresponds to the only possible unsized field,
266 /// and its type can be used to determine unsizing strategy.
268 /// This is parameterized over the normalization strategy (i.e. how to
269 /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
270 /// function to indicate no normalization should take place.
272 /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
274 pub fn struct_tail_with_normalize(
277 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
281 ty::Adt(def, substs) => {
282 if !def.is_struct() {
285 match def.non_enum_variant().fields.last() {
286 Some(f) => ty = f.ty(self, substs),
292 if let Some((&last_ty, _)) = tys.split_last() {
293 ty = last_ty.expect_ty();
299 ty::Projection(_) | ty::Opaque(..) => {
300 let normalized = normalize(ty);
301 if ty == normalized {
316 /// Same as applying `struct_tail` on `source` and `target`, but only
317 /// keeps going as long as the two types are instances of the same
318 /// structure definitions.
319 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
320 /// whereas struct_tail produces `T`, and `Trait`, respectively.
322 /// Should only be called if the types have no inference variables and do
323 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
324 /// normalization attempt may cause compiler bugs.
325 pub fn struct_lockstep_tails_erasing_lifetimes(
329 param_env: ty::ParamEnv<'tcx>,
330 ) -> (Ty<'tcx>, Ty<'tcx>) {
332 tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
333 tcx.normalize_erasing_regions(param_env, ty)
337 /// Same as applying `struct_tail` on `source` and `target`, but only
338 /// keeps going as long as the two types are instances of the same
339 /// structure definitions.
340 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
341 /// whereas struct_tail produces `T`, and `Trait`, respectively.
343 /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
345 pub fn struct_lockstep_tails_with_normalize(
349 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
350 ) -> (Ty<'tcx>, Ty<'tcx>) {
351 let (mut a, mut b) = (source, target);
353 match (&a.kind, &b.kind) {
354 (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
355 if a_def == b_def && a_def.is_struct() =>
357 if let Some(f) = a_def.non_enum_variant().fields.last() {
358 a = f.ty(self, a_substs);
359 b = f.ty(self, b_substs);
364 (&Tuple(a_tys), &Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
365 if let Some(a_last) = a_tys.last() {
366 a = a_last.expect_ty();
367 b = b_tys.last().unwrap().expect_ty();
372 (ty::Projection(_), _)
373 | (ty::Opaque(..), _)
374 | (_, ty::Projection(_))
375 | (_, ty::Opaque(..)) => {
376 // If either side is a projection, attempt to
377 // progress via normalization. (Should be safe to
378 // apply to both sides as normalization is
380 let a_norm = normalize(a);
381 let b_norm = normalize(b);
382 if a == a_norm && b == b_norm {
396 /// Given a set of predicates that apply to an object type, returns
397 /// the region bounds that the (erased) `Self` type must
398 /// outlive. Precisely *because* the `Self` type is erased, the
399 /// parameter `erased_self_ty` must be supplied to indicate what type
400 /// has been used to represent `Self` in the predicates
401 /// themselves. This should really be a unique type; `FreshTy(0)` is a
404 /// N.B., in some cases, particularly around higher-ranked bounds,
405 /// this function returns a kind of conservative approximation.
406 /// That is, all regions returned by this function are definitely
407 /// required, but there may be other region bounds that are not
408 /// returned, as well as requirements like `for<'a> T: 'a`.
410 /// Requires that trait definitions have been processed so that we can
411 /// elaborate predicates and walk supertraits.
413 // FIXME: callers may only have a `&[Predicate]`, not a `Vec`, so that's
414 // what this code should accept.
415 pub fn required_region_bounds(
417 erased_self_ty: Ty<'tcx>,
418 predicates: Vec<ty::Predicate<'tcx>>,
419 ) -> Vec<ty::Region<'tcx>> {
421 "required_region_bounds(erased_self_ty={:?}, predicates={:?})",
422 erased_self_ty, predicates
425 assert!(!erased_self_ty.has_escaping_bound_vars());
427 traits::elaborate_predicates(self, predicates)
428 .filter_map(|predicate| {
430 ty::Predicate::Projection(..)
431 | ty::Predicate::Trait(..)
432 | ty::Predicate::Subtype(..)
433 | ty::Predicate::WellFormed(..)
434 | ty::Predicate::ObjectSafe(..)
435 | ty::Predicate::ClosureKind(..)
436 | ty::Predicate::RegionOutlives(..)
437 | ty::Predicate::ConstEvaluatable(..) => None,
438 ty::Predicate::TypeOutlives(predicate) => {
439 // Search for a bound of the form `erased_self_ty
440 // : 'a`, but be wary of something like `for<'a>
441 // erased_self_ty : 'a` (we interpret a
442 // higher-ranked bound like that as 'static,
443 // though at present the code in `fulfill.rs`
444 // considers such bounds to be unsatisfiable, so
445 // it's kind of a moot point since you could never
446 // construct such an object, but this seems
447 // correct even if that code changes).
448 let ty::OutlivesPredicate(ref t, ref r) = predicate.skip_binder();
449 if t == &erased_self_ty && !r.has_escaping_bound_vars() {
460 /// Calculate the destructor of a given type.
461 pub fn calculate_dtor(
464 validate: &mut dyn FnMut(Self, DefId) -> Result<(), ErrorReported>,
465 ) -> Option<ty::Destructor> {
466 let drop_trait = if let Some(def_id) = self.lang_items().drop_trait() {
472 self.ensure().coherent_trait(drop_trait);
474 let mut dtor_did = None;
475 let ty = self.type_of(adt_did);
476 self.for_each_relevant_impl(drop_trait, ty, |impl_did| {
477 if let Some(item) = self.associated_items(impl_did).next() {
478 if validate(self, impl_did).is_ok() {
479 dtor_did = Some(item.def_id);
484 Some(ty::Destructor { did: dtor_did? })
487 /// Returns the set of types that are required to be alive in
488 /// order to run the destructor of `def` (see RFCs 769 and
491 /// Note that this returns only the constraints for the
492 /// destructor of `def` itself. For the destructors of the
493 /// contents, you need `adt_dtorck_constraint`.
494 pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
495 let dtor = match def.destructor(self) {
497 debug!("destructor_constraints({:?}) - no dtor", def.did);
500 Some(dtor) => dtor.did,
503 let impl_def_id = self.associated_item(dtor).container.id();
504 let impl_generics = self.generics_of(impl_def_id);
506 // We have a destructor - all the parameters that are not
507 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
510 // We need to return the list of parameters from the ADTs
511 // generics/substs that correspond to impure parameters on the
512 // impl's generics. This is a bit ugly, but conceptually simple:
514 // Suppose our ADT looks like the following
516 // struct S<X, Y, Z>(X, Y, Z);
520 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
522 // We want to return the parameters (X, Y). For that, we match
523 // up the item-substs <X, Y, Z> with the substs on the impl ADT,
524 // <P1, P2, P0>, and then look up which of the impl substs refer to
525 // parameters marked as pure.
527 let impl_substs = match self.type_of(impl_def_id).kind {
528 ty::Adt(def_, substs) if def_ == def => substs,
532 let item_substs = match self.type_of(def.did).kind {
533 ty::Adt(def_, substs) if def_ == def => substs,
537 let result = item_substs
539 .zip(impl_substs.iter())
542 GenericArgKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => {
543 !impl_generics.region_param(ebr, self).pure_wrt_drop
545 GenericArgKind::Type(&ty::TyS { kind: ty::Param(ref pt), .. }) => {
546 !impl_generics.type_param(pt, self).pure_wrt_drop
548 GenericArgKind::Const(&ty::Const {
549 val: ty::ConstKind::Param(ref pc), ..
550 }) => !impl_generics.const_param(pc, self).pure_wrt_drop,
551 GenericArgKind::Lifetime(_)
552 | GenericArgKind::Type(_)
553 | GenericArgKind::Const(_) => {
554 // Not a type, const or region param: this should be reported
560 .map(|(&item_param, _)| item_param)
562 debug!("destructor_constraint({:?}) = {:?}", def.did, result);
566 /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
567 /// that closures have a `DefId`, but the closure *expression* also
568 /// has a `HirId` that is located within the context where the
569 /// closure appears (and, sadly, a corresponding `NodeId`, since
570 /// those are not yet phased out). The parent of the closure's
571 /// `DefId` will also be the context where it appears.
572 pub fn is_closure(self, def_id: DefId) -> bool {
573 self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr
576 /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
577 pub fn is_trait(self, def_id: DefId) -> bool {
578 self.def_kind(def_id) == Some(DefKind::Trait)
581 /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
582 /// and `false` otherwise.
583 pub fn is_trait_alias(self, def_id: DefId) -> bool {
584 self.def_kind(def_id) == Some(DefKind::TraitAlias)
587 /// Returns `true` if this `DefId` refers to the implicit constructor for
588 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
589 pub fn is_constructor(self, def_id: DefId) -> bool {
590 self.def_key(def_id).disambiguated_data.data == DefPathData::Ctor
593 /// Given the def-ID of a fn or closure, returns the def-ID of
594 /// the innermost fn item that the closure is contained within.
595 /// This is a significant `DefId` because, when we do
596 /// type-checking, we type-check this fn item and all of its
597 /// (transitive) closures together. Therefore, when we fetch the
598 /// `typeck_tables_of` the closure, for example, we really wind up
599 /// fetching the `typeck_tables_of` the enclosing fn item.
600 pub fn closure_base_def_id(self, def_id: DefId) -> DefId {
601 let mut def_id = def_id;
602 while self.is_closure(def_id) {
603 def_id = self.parent(def_id).unwrap_or_else(|| {
604 bug!("closure {:?} has no parent", def_id);
610 /// Given the `DefId` and substs a closure, creates the type of
611 /// `self` argument that the closure expects. For example, for a
612 /// `Fn` closure, this would return a reference type `&T` where
613 /// `T = closure_ty`.
615 /// Returns `None` if this closure's kind has not yet been inferred.
616 /// This should only be possible during type checking.
618 /// Note that the return value is a late-bound region and hence
619 /// wrapped in a binder.
620 pub fn closure_env_ty(
622 closure_def_id: DefId,
623 closure_substs: SubstsRef<'tcx>,
624 ) -> Option<ty::Binder<Ty<'tcx>>> {
625 let closure_ty = self.mk_closure(closure_def_id, closure_substs);
626 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
627 let closure_kind_ty = closure_substs.as_closure().kind_ty(closure_def_id, self);
628 let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
629 let env_ty = match closure_kind {
630 ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
631 ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
632 ty::ClosureKind::FnOnce => closure_ty,
634 Some(ty::Binder::bind(env_ty))
637 /// Given the `DefId` of some item that has no type or const parameters, make
638 /// a suitable "empty substs" for it.
639 pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> SubstsRef<'tcx> {
640 InternalSubsts::for_item(self, item_def_id, |param, _| match param.kind {
641 GenericParamDefKind::Lifetime => self.lifetimes.re_erased.into(),
642 GenericParamDefKind::Type { .. } => {
643 bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
645 GenericParamDefKind::Const { .. } => {
646 bug!("empty_substs_for_def_id: {:?} has const parameters", item_def_id)
651 /// Returns `true` if the node pointed to by `def_id` is a `static` item.
652 pub fn is_static(&self, def_id: DefId) -> bool {
653 self.static_mutability(def_id).is_some()
656 /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
657 pub fn is_mutable_static(&self, def_id: DefId) -> bool {
658 self.static_mutability(def_id) == Some(hir::Mutability::Mut)
661 /// Get the type of the pointer to the static that we use in MIR.
662 pub fn static_ptr_ty(&self, def_id: DefId) -> Ty<'tcx> {
663 // Make sure that any constants in the static's type are evaluated.
664 let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
666 if self.is_mutable_static(def_id) {
667 self.mk_mut_ptr(static_ty)
668 } else if self.is_foreign_item(def_id) {
669 self.mk_imm_ptr(static_ty)
671 self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
675 /// Expands the given impl trait type, stopping if the type is recursive.
676 pub fn try_expand_impl_trait_type(
679 substs: SubstsRef<'tcx>,
680 ) -> Result<Ty<'tcx>, Ty<'tcx>> {
681 use crate::ty::fold::TypeFolder;
683 struct OpaqueTypeExpander<'tcx> {
684 // Contains the DefIds of the opaque types that are currently being
685 // expanded. When we expand an opaque type we insert the DefId of
686 // that type, and when we finish expanding that type we remove the
688 seen_opaque_tys: FxHashSet<DefId>,
689 // Cache of all expansions we've seen so far. This is a critical
690 // optimization for some large types produced by async fn trees.
691 expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
692 primary_def_id: DefId,
693 found_recursion: bool,
697 impl<'tcx> OpaqueTypeExpander<'tcx> {
701 substs: SubstsRef<'tcx>,
702 ) -> Option<Ty<'tcx>> {
703 if self.found_recursion {
706 let substs = substs.fold_with(self);
707 if self.seen_opaque_tys.insert(def_id) {
708 let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
709 Some(expanded_ty) => expanded_ty,
711 let generic_ty = self.tcx.type_of(def_id);
712 let concrete_ty = generic_ty.subst(self.tcx, substs);
713 let expanded_ty = self.fold_ty(concrete_ty);
714 self.expanded_cache.insert((def_id, substs), expanded_ty);
718 self.seen_opaque_tys.remove(&def_id);
721 // If another opaque type that we contain is recursive, then it
722 // will report the error, so we don't have to.
723 self.found_recursion = def_id == self.primary_def_id;
729 impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
730 fn tcx(&self) -> TyCtxt<'tcx> {
734 fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
735 if let ty::Opaque(def_id, substs) = t.kind {
736 self.expand_opaque_ty(def_id, substs).unwrap_or(t)
737 } else if t.has_projections() {
738 t.super_fold_with(self)
745 let mut visitor = OpaqueTypeExpander {
746 seen_opaque_tys: FxHashSet::default(),
747 expanded_cache: FxHashMap::default(),
748 primary_def_id: def_id,
749 found_recursion: false,
752 let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
753 if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
757 impl<'tcx> ty::TyS<'tcx> {
758 /// Checks whether values of this type `T` are *moved* or *copied*
759 /// when referenced -- this amounts to a check for whether `T:
760 /// Copy`, but note that we **don't** consider lifetimes when
761 /// doing this check. This means that we may generate MIR which
762 /// does copies even when the type actually doesn't satisfy the
763 /// full requirements for the `Copy` trait (cc #29149) -- this
764 /// winds up being reported as an error during NLL borrow check.
765 pub fn is_copy_modulo_regions(
768 param_env: ty::ParamEnv<'tcx>,
771 tcx.at(span).is_copy_raw(param_env.and(self))
774 /// Checks whether values of this type `T` have a size known at
775 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
776 /// for the purposes of this check, so it can be an
777 /// over-approximation in generic contexts, where one can have
778 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
779 /// actually carry lifetime requirements.
780 pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
781 tcx_at.is_sized_raw(param_env.and(self))
784 /// Checks whether values of this type `T` implement the `Freeze`
785 /// trait -- frozen types are those that do not contain a
786 /// `UnsafeCell` anywhere. This is a language concept used to
787 /// distinguish "true immutability", which is relevant to
788 /// optimization as well as the rules around static values. Note
789 /// that the `Freeze` trait is not exposed to end users and is
790 /// effectively an implementation detail.
794 param_env: ty::ParamEnv<'tcx>,
797 tcx.at(span).is_freeze_raw(param_env.and(self))
800 /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
801 /// non-copy and *might* have a destructor attached; if it returns
802 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
804 /// (Note that this implies that if `ty` has a destructor attached,
805 /// then `needs_drop` will definitely return `true` for `ty`.)
807 /// Note that this method is used to check eligible types in unions.
809 pub fn needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
810 tcx.needs_drop_raw(param_env.and(self)).0
813 pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
814 match (&a.kind, &b.kind) {
815 (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
820 substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b))
826 /// Check whether a type is representable. This means it cannot contain unboxed
827 /// structural recursion. This check is needed for structs and enums.
828 pub fn is_representable(&'tcx self, tcx: TyCtxt<'tcx>, sp: Span) -> Representability {
829 // Iterate until something non-representable is found
830 fn fold_repr<It: Iterator<Item = Representability>>(iter: It) -> Representability {
831 iter.fold(Representability::Representable, |r1, r2| match (r1, r2) {
832 (Representability::SelfRecursive(v1), Representability::SelfRecursive(v2)) => {
833 Representability::SelfRecursive(v1.into_iter().chain(v2).collect())
835 (r1, r2) => cmp::max(r1, r2),
839 fn are_inner_types_recursive<'tcx>(
842 seen: &mut Vec<Ty<'tcx>>,
843 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
845 ) -> Representability {
848 // Find non representable
849 fold_repr(ty.tuple_fields().map(|ty| {
850 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
853 // Fixed-length vectors.
854 // FIXME(#11924) Behavior undecided for zero-length vectors.
856 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
858 Adt(def, substs) => {
859 // Find non representable fields with their spans
860 fold_repr(def.all_fields().map(|field| {
861 let ty = field.ty(tcx, substs);
862 let span = tcx.hir().span_if_local(field.did).unwrap_or(sp);
863 match is_type_structurally_recursive(
870 Representability::SelfRecursive(_) => {
871 Representability::SelfRecursive(vec![span])
878 // this check is run on type definitions, so we don't expect
879 // to see closure types
880 bug!("requires check invoked on inapplicable type: {:?}", ty)
882 _ => Representability::Representable,
886 fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
888 Adt(ty_def, _) => ty_def == def,
893 // Does the type `ty` directly (without indirection through a pointer)
894 // contain any types on stack `seen`?
895 fn is_type_structurally_recursive<'tcx>(
898 seen: &mut Vec<Ty<'tcx>>,
899 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
901 ) -> Representability {
902 debug!("is_type_structurally_recursive: {:?} {:?}", ty, sp);
903 if let Some(representability) = representable_cache.get(ty) {
905 "is_type_structurally_recursive: {:?} {:?} - (cached) {:?}",
906 ty, sp, representability
908 return representability.clone();
911 let representability =
912 is_type_structurally_recursive_inner(tcx, sp, seen, representable_cache, ty);
914 representable_cache.insert(ty, representability.clone());
918 fn is_type_structurally_recursive_inner<'tcx>(
921 seen: &mut Vec<Ty<'tcx>>,
922 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
924 ) -> Representability {
928 // Iterate through stack of previously seen types.
929 let mut iter = seen.iter();
931 // The first item in `seen` is the type we are actually curious about.
932 // We want to return SelfRecursive if this type contains itself.
933 // It is important that we DON'T take generic parameters into account
934 // for this check, so that Bar<T> in this example counts as SelfRecursive:
937 // struct Bar<T> { x: Bar<Foo> }
939 if let Some(&seen_type) = iter.next() {
940 if same_struct_or_enum(seen_type, def) {
941 debug!("SelfRecursive: {:?} contains {:?}", seen_type, ty);
942 return Representability::SelfRecursive(vec![sp]);
946 // We also need to know whether the first item contains other types
947 // that are structurally recursive. If we don't catch this case, we
948 // will recurse infinitely for some inputs.
950 // It is important that we DO take generic parameters into account
951 // here, so that code like this is considered SelfRecursive, not
952 // ContainsRecursive:
954 // struct Foo { Option<Option<Foo>> }
956 for &seen_type in iter {
957 if ty::TyS::same_type(ty, seen_type) {
958 debug!("ContainsRecursive: {:?} contains {:?}", seen_type, ty);
959 return Representability::ContainsRecursive;
964 // For structs and enums, track all previously seen types by pushing them
965 // onto the 'seen' stack.
967 let out = are_inner_types_recursive(tcx, sp, seen, representable_cache, ty);
972 // No need to push in other cases.
973 are_inner_types_recursive(tcx, sp, seen, representable_cache, ty)
978 debug!("is_type_representable: {:?}", self);
980 // To avoid a stack overflow when checking an enum variant or struct that
981 // contains a different, structurally recursive type, maintain a stack
982 // of seen types and check recursion for each of them (issues #3008, #3779).
983 let mut seen: Vec<Ty<'_>> = Vec::new();
984 let mut representable_cache = FxHashMap::default();
985 let r = is_type_structurally_recursive(tcx, sp, &mut seen, &mut representable_cache, self);
986 debug!("is_type_representable: {:?} is {:?}", self, r);
990 /// Peel off all reference types in this type until there are none left.
992 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
997 /// - `&'a mut u8` -> `u8`
998 /// - `&'a &'b u8` -> `u8`
999 /// - `&'a *const &'b u8 -> *const &'b u8`
1000 pub fn peel_refs(&'tcx self) -> Ty<'tcx> {
1002 while let Ref(_, inner_ty, _) = ty.kind {
1009 fn is_copy_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
1010 is_item_raw(tcx, query, lang_items::CopyTraitLangItem)
1013 fn is_sized_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
1014 is_item_raw(tcx, query, lang_items::SizedTraitLangItem)
1017 fn is_freeze_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
1018 is_item_raw(tcx, query, lang_items::FreezeTraitLangItem)
1021 fn is_item_raw<'tcx>(
1023 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1024 item: lang_items::LangItem,
1026 let (param_env, ty) = query.into_parts();
1027 let trait_def_id = tcx.require_lang_item(item, None);
1028 tcx.infer_ctxt().enter(|infcx| {
1029 traits::type_known_to_meet_bound_modulo_regions(
1039 #[derive(Clone, HashStable)]
1040 pub struct NeedsDrop(pub bool);
1042 fn needs_drop_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> NeedsDrop {
1043 let (param_env, ty) = query.into_parts();
1045 let needs_drop = |ty: Ty<'tcx>| -> bool { tcx.needs_drop_raw(param_env.and(ty)).0 };
1047 assert!(!ty.needs_infer());
1049 NeedsDrop(match ty.kind {
1050 // Fast-path for primitive types
1051 ty::Infer(ty::FreshIntTy(_))
1052 | ty::Infer(ty::FreshFloatTy(_))
1061 | ty::GeneratorWitness(..)
1066 // Foreign types can never have destructors
1067 ty::Foreign(..) => false,
1069 // `ManuallyDrop` doesn't have a destructor regardless of field types.
1070 ty::Adt(def, _) if Some(def.did) == tcx.lang_items().manually_drop() => false,
1072 // Issue #22536: We first query `is_copy_modulo_regions`. It sees a
1073 // normalized version of the type, and therefore will definitely
1074 // know whether the type implements Copy (and thus needs no
1075 // cleanup/drop/zeroing) ...
1076 _ if ty.is_copy_modulo_regions(tcx, param_env, DUMMY_SP) => false,
1078 // ... (issue #22536 continued) but as an optimization, still use
1079 // prior logic of asking for the structural "may drop".
1081 // FIXME(#22815): Note that this is a conservative heuristic;
1082 // it may report that the type "may drop" when actual type does
1083 // not actually have a destructor associated with it. But since
1084 // the type absolutely did not have the `Copy` bound attached
1085 // (see above), it is sound to treat it as having a destructor.
1087 // User destructors are the only way to have concrete drop types.
1088 ty::Adt(def, _) if def.has_dtor(tcx) => true,
1090 // Can refer to a type which may drop.
1091 // FIXME(eddyb) check this against a ParamEnv.
1093 | ty::Projection(..)
1096 | ty::Placeholder(..)
1099 | ty::Error => true,
1101 ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"),
1103 // Zero-length arrays never contain anything to drop.
1104 ty::Array(_, len) if len.try_eval_usize(tcx, param_env) == Some(0) => false,
1106 // Structural recursion.
1107 ty::Array(ty, _) | ty::Slice(ty) => needs_drop(ty),
1109 ty::Closure(def_id, ref substs) => {
1110 substs.as_closure().upvar_tys(def_id, tcx).any(needs_drop)
1113 // Pessimistically assume that all generators will require destructors
1114 // as we don't know if a destructor is a noop or not until after the MIR
1115 // state transformation pass
1116 ty::Generator(..) => true,
1118 ty::Tuple(..) => ty.tuple_fields().any(needs_drop),
1120 // unions don't have destructors because of the child types,
1121 // only if they manually implement `Drop` (handled above).
1122 ty::Adt(def, _) if def.is_union() => false,
1124 ty::Adt(def, substs) => def
1127 .any(|variant| variant.fields.iter().any(|field| needs_drop(field.ty(tcx, substs)))),
1131 pub enum ExplicitSelf<'tcx> {
1133 ByReference(ty::Region<'tcx>, hir::Mutability),
1134 ByRawPointer(hir::Mutability),
1139 impl<'tcx> ExplicitSelf<'tcx> {
1140 /// Categorizes an explicit self declaration like `self: SomeType`
1141 /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
1143 /// This is mainly used to require the arbitrary_self_types feature
1144 /// in the case of `Other`, to improve error messages in the common cases,
1145 /// and to make `Other` non-object-safe.
1150 /// impl<'a> Foo for &'a T {
1151 /// // Legal declarations:
1152 /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
1153 /// fn method2(self: &'a T); // ExplicitSelf::ByValue
1154 /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
1155 /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
1157 /// // Invalid cases will be caught by `check_method_receiver`:
1158 /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
1159 /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
1160 /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
1164 pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
1166 P: Fn(Ty<'tcx>) -> bool,
1168 use self::ExplicitSelf::*;
1170 match self_arg_ty.kind {
1171 _ if is_self_ty(self_arg_ty) => ByValue,
1172 ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
1173 ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
1174 ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
1180 pub fn provide(providers: &mut ty::query::Providers<'_>) {
1181 *providers = ty::query::Providers {