1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
3 use crate::hir::map::DefPathData;
4 use crate::ich::NodeIdHashingMode;
5 use crate::mir::interpret::{sign_extend, truncate};
6 use crate::ty::layout::{Integer, IntegerExt, Size};
7 use crate::ty::query::TyCtxtAt;
8 use crate::ty::subst::{GenericArgKind, InternalSubsts, Subst, SubstsRef};
9 use crate::ty::TyKind::*;
10 use crate::ty::{self, DefIdTree, GenericParamDefKind, Ty, TyCtxt, TypeFoldable};
11 use crate::util::common::ErrorReported;
12 use rustc_apfloat::Float as _;
13 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
14 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
16 use rustc_hir::def::DefKind;
17 use rustc_hir::def_id::DefId;
18 use rustc_macros::HashStable;
22 use syntax::attr::{self, SignedInt, UnsignedInt};
24 #[derive(Copy, Clone, Debug)]
25 pub struct Discr<'tcx> {
26 /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
31 impl<'tcx> fmt::Display for Discr<'tcx> {
32 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
35 let size = ty::tls::with(|tcx| Integer::from_attr(&tcx, SignedInt(ity)).size());
37 // sign extend the raw representation to be an i128
38 let x = sign_extend(x, size) as i128;
41 _ => write!(fmt, "{}", self.val),
46 fn signed_min(size: Size) -> i128 {
47 sign_extend(1_u128 << (size.bits() - 1), size) as i128
50 fn signed_max(size: Size) -> i128 {
51 i128::max_value() >> (128 - size.bits())
54 fn unsigned_max(size: Size) -> u128 {
55 u128::max_value() >> (128 - size.bits())
58 fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
59 let (int, signed) = match ty.kind {
60 Int(ity) => (Integer::from_attr(&tcx, SignedInt(ity)), true),
61 Uint(uty) => (Integer::from_attr(&tcx, UnsignedInt(uty)), false),
62 _ => bug!("non integer discriminant"),
67 impl<'tcx> Discr<'tcx> {
68 /// Adds `1` to the value and wraps around if the maximum for the type is reached.
69 pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
70 self.checked_add(tcx, 1).0
72 pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
73 let (size, signed) = int_size_and_signed(tcx, self.ty);
74 let (val, oflo) = if signed {
75 let min = signed_min(size);
76 let max = signed_max(size);
77 let val = sign_extend(self.val, size) as i128;
78 assert!(n < (i128::max_value() as u128));
80 let oflo = val > max - n;
81 let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
82 // zero the upper bits
83 let val = val as u128;
84 let val = truncate(val, size);
87 let max = unsigned_max(size);
89 let oflo = val > max - n;
90 let val = if oflo { n - (max - val) - 1 } else { val + n };
93 (Self { val, ty: self.ty }, oflo)
97 pub trait IntTypeExt {
98 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
99 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
100 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
103 impl IntTypeExt for attr::IntType {
104 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
106 SignedInt(ast::IntTy::I8) => tcx.types.i8,
107 SignedInt(ast::IntTy::I16) => tcx.types.i16,
108 SignedInt(ast::IntTy::I32) => tcx.types.i32,
109 SignedInt(ast::IntTy::I64) => tcx.types.i64,
110 SignedInt(ast::IntTy::I128) => tcx.types.i128,
111 SignedInt(ast::IntTy::Isize) => tcx.types.isize,
112 UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
113 UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
114 UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
115 UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
116 UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
117 UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
121 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
122 Discr { val: 0, ty: self.to_ty(tcx) }
125 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
126 if let Some(val) = val {
127 assert_eq!(self.to_ty(tcx), val.ty);
128 let (new, oflo) = val.checked_add(tcx, 1);
129 if oflo { None } else { Some(new) }
131 Some(self.initial_discriminant(tcx))
136 /// Describes whether a type is representable. For types that are not
137 /// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
138 /// distinguish between types that are recursive with themselves and types that
139 /// contain a different recursive type. These cases can therefore be treated
140 /// differently when reporting errors.
142 /// The ordering of the cases is significant. They are sorted so that cmp::max
143 /// will keep the "more erroneous" of two values.
144 #[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
145 pub enum Representability {
148 SelfRecursive(Vec<Span>),
151 impl<'tcx> TyCtxt<'tcx> {
152 /// Creates a hash of the type `Ty` which will be the same no matter what crate
153 /// context it's calculated within. This is used by the `type_id` intrinsic.
154 pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
155 let mut hasher = StableHasher::new();
156 let mut hcx = self.create_stable_hashing_context();
158 // We want the type_id be independent of the types free regions, so we
159 // erase them. The erase_regions() call will also anonymize bound
160 // regions, which is desirable too.
161 let ty = self.erase_regions(&ty);
163 hcx.while_hashing_spans(false, |hcx| {
164 hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
165 ty.hash_stable(hcx, &mut hasher);
172 impl<'tcx> TyCtxt<'tcx> {
173 pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
174 if let ty::Adt(def, substs) = ty.kind {
175 for field in def.all_fields() {
176 let field_ty = field.ty(self, substs);
177 if let Error = field_ty.kind {
185 /// Attempts to returns the deeply last field of nested structures, but
186 /// does not apply any normalization in its search. Returns the same type
187 /// if input `ty` is not a structure at all.
188 pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
190 tcx.struct_tail_with_normalize(ty, |ty| ty)
193 /// Returns the deeply last field of nested structures, or the same type if
194 /// not a structure at all. Corresponds to the only possible unsized field,
195 /// and its type can be used to determine unsizing strategy.
197 /// Should only be called if `ty` has no inference variables and does not
198 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
199 /// normalization attempt may cause compiler bugs.
200 pub fn struct_tail_erasing_lifetimes(
203 param_env: ty::ParamEnv<'tcx>,
206 tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty))
209 /// Returns the deeply last field of nested structures, or the same type if
210 /// not a structure at all. Corresponds to the only possible unsized field,
211 /// and its type can be used to determine unsizing strategy.
213 /// This is parameterized over the normalization strategy (i.e. how to
214 /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
215 /// function to indicate no normalization should take place.
217 /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
219 pub fn struct_tail_with_normalize(
222 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
226 ty::Adt(def, substs) => {
227 if !def.is_struct() {
230 match def.non_enum_variant().fields.last() {
231 Some(f) => ty = f.ty(self, substs),
237 if let Some((&last_ty, _)) = tys.split_last() {
238 ty = last_ty.expect_ty();
244 ty::Projection(_) | ty::Opaque(..) => {
245 let normalized = normalize(ty);
246 if ty == normalized {
261 /// Same as applying `struct_tail` on `source` and `target`, but only
262 /// keeps going as long as the two types are instances of the same
263 /// structure definitions.
264 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
265 /// whereas struct_tail produces `T`, and `Trait`, respectively.
267 /// Should only be called if the types have no inference variables and do
268 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
269 /// normalization attempt may cause compiler bugs.
270 pub fn struct_lockstep_tails_erasing_lifetimes(
274 param_env: ty::ParamEnv<'tcx>,
275 ) -> (Ty<'tcx>, Ty<'tcx>) {
277 tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
278 tcx.normalize_erasing_regions(param_env, ty)
282 /// Same as applying `struct_tail` on `source` and `target`, but only
283 /// keeps going as long as the two types are instances of the same
284 /// structure definitions.
285 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
286 /// whereas struct_tail produces `T`, and `Trait`, respectively.
288 /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
290 pub fn struct_lockstep_tails_with_normalize(
294 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
295 ) -> (Ty<'tcx>, Ty<'tcx>) {
296 let (mut a, mut b) = (source, target);
298 match (&a.kind, &b.kind) {
299 (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
300 if a_def == b_def && a_def.is_struct() =>
302 if let Some(f) = a_def.non_enum_variant().fields.last() {
303 a = f.ty(self, a_substs);
304 b = f.ty(self, b_substs);
309 (&Tuple(a_tys), &Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
310 if let Some(a_last) = a_tys.last() {
311 a = a_last.expect_ty();
312 b = b_tys.last().unwrap().expect_ty();
317 (ty::Projection(_), _)
318 | (ty::Opaque(..), _)
319 | (_, ty::Projection(_))
320 | (_, ty::Opaque(..)) => {
321 // If either side is a projection, attempt to
322 // progress via normalization. (Should be safe to
323 // apply to both sides as normalization is
325 let a_norm = normalize(a);
326 let b_norm = normalize(b);
327 if a == a_norm && b == b_norm {
341 /// Calculate the destructor of a given type.
342 pub fn calculate_dtor(
345 validate: &mut dyn FnMut(Self, DefId) -> Result<(), ErrorReported>,
346 ) -> Option<ty::Destructor> {
347 let drop_trait = if let Some(def_id) = self.lang_items().drop_trait() {
353 self.ensure().coherent_trait(drop_trait);
355 let mut dtor_did = None;
356 let ty = self.type_of(adt_did);
357 self.for_each_relevant_impl(drop_trait, ty, |impl_did| {
358 if let Some(item) = self.associated_items(impl_did).next() {
359 if validate(self, impl_did).is_ok() {
360 dtor_did = Some(item.def_id);
365 Some(ty::Destructor { did: dtor_did? })
368 /// Returns the set of types that are required to be alive in
369 /// order to run the destructor of `def` (see RFCs 769 and
372 /// Note that this returns only the constraints for the
373 /// destructor of `def` itself. For the destructors of the
374 /// contents, you need `adt_dtorck_constraint`.
375 pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
376 let dtor = match def.destructor(self) {
378 debug!("destructor_constraints({:?}) - no dtor", def.did);
381 Some(dtor) => dtor.did,
384 let impl_def_id = self.associated_item(dtor).container.id();
385 let impl_generics = self.generics_of(impl_def_id);
387 // We have a destructor - all the parameters that are not
388 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
391 // We need to return the list of parameters from the ADTs
392 // generics/substs that correspond to impure parameters on the
393 // impl's generics. This is a bit ugly, but conceptually simple:
395 // Suppose our ADT looks like the following
397 // struct S<X, Y, Z>(X, Y, Z);
401 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
403 // We want to return the parameters (X, Y). For that, we match
404 // up the item-substs <X, Y, Z> with the substs on the impl ADT,
405 // <P1, P2, P0>, and then look up which of the impl substs refer to
406 // parameters marked as pure.
408 let impl_substs = match self.type_of(impl_def_id).kind {
409 ty::Adt(def_, substs) if def_ == def => substs,
413 let item_substs = match self.type_of(def.did).kind {
414 ty::Adt(def_, substs) if def_ == def => substs,
418 let result = item_substs
420 .zip(impl_substs.iter())
423 GenericArgKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => {
424 !impl_generics.region_param(ebr, self).pure_wrt_drop
426 GenericArgKind::Type(&ty::TyS { kind: ty::Param(ref pt), .. }) => {
427 !impl_generics.type_param(pt, self).pure_wrt_drop
429 GenericArgKind::Const(&ty::Const {
430 val: ty::ConstKind::Param(ref pc), ..
431 }) => !impl_generics.const_param(pc, self).pure_wrt_drop,
432 GenericArgKind::Lifetime(_)
433 | GenericArgKind::Type(_)
434 | GenericArgKind::Const(_) => {
435 // Not a type, const or region param: this should be reported
441 .map(|(&item_param, _)| item_param)
443 debug!("destructor_constraint({:?}) = {:?}", def.did, result);
447 /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
448 /// that closures have a `DefId`, but the closure *expression* also
449 /// has a `HirId` that is located within the context where the
450 /// closure appears (and, sadly, a corresponding `NodeId`, since
451 /// those are not yet phased out). The parent of the closure's
452 /// `DefId` will also be the context where it appears.
453 pub fn is_closure(self, def_id: DefId) -> bool {
454 self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr
457 /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
458 pub fn is_trait(self, def_id: DefId) -> bool {
459 self.def_kind(def_id) == Some(DefKind::Trait)
462 /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
463 /// and `false` otherwise.
464 pub fn is_trait_alias(self, def_id: DefId) -> bool {
465 self.def_kind(def_id) == Some(DefKind::TraitAlias)
468 /// Returns `true` if this `DefId` refers to the implicit constructor for
469 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
470 pub fn is_constructor(self, def_id: DefId) -> bool {
471 self.def_key(def_id).disambiguated_data.data == DefPathData::Ctor
474 /// Given the def-ID of a fn or closure, returns the def-ID of
475 /// the innermost fn item that the closure is contained within.
476 /// This is a significant `DefId` because, when we do
477 /// type-checking, we type-check this fn item and all of its
478 /// (transitive) closures together. Therefore, when we fetch the
479 /// `typeck_tables_of` the closure, for example, we really wind up
480 /// fetching the `typeck_tables_of` the enclosing fn item.
481 pub fn closure_base_def_id(self, def_id: DefId) -> DefId {
482 let mut def_id = def_id;
483 while self.is_closure(def_id) {
484 def_id = self.parent(def_id).unwrap_or_else(|| {
485 bug!("closure {:?} has no parent", def_id);
491 /// Given the `DefId` and substs a closure, creates the type of
492 /// `self` argument that the closure expects. For example, for a
493 /// `Fn` closure, this would return a reference type `&T` where
494 /// `T = closure_ty`.
496 /// Returns `None` if this closure's kind has not yet been inferred.
497 /// This should only be possible during type checking.
499 /// Note that the return value is a late-bound region and hence
500 /// wrapped in a binder.
501 pub fn closure_env_ty(
503 closure_def_id: DefId,
504 closure_substs: SubstsRef<'tcx>,
505 ) -> Option<ty::Binder<Ty<'tcx>>> {
506 let closure_ty = self.mk_closure(closure_def_id, closure_substs);
507 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
508 let closure_kind_ty = closure_substs.as_closure().kind_ty(closure_def_id, self);
509 let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
510 let env_ty = match closure_kind {
511 ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
512 ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
513 ty::ClosureKind::FnOnce => closure_ty,
515 Some(ty::Binder::bind(env_ty))
518 /// Given the `DefId` of some item that has no type or const parameters, make
519 /// a suitable "empty substs" for it.
520 pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> SubstsRef<'tcx> {
521 InternalSubsts::for_item(self, item_def_id, |param, _| match param.kind {
522 GenericParamDefKind::Lifetime => self.lifetimes.re_erased.into(),
523 GenericParamDefKind::Type { .. } => {
524 bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
526 GenericParamDefKind::Const { .. } => {
527 bug!("empty_substs_for_def_id: {:?} has const parameters", item_def_id)
532 /// Returns `true` if the node pointed to by `def_id` is a `static` item.
533 pub fn is_static(&self, def_id: DefId) -> bool {
534 self.static_mutability(def_id).is_some()
537 /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
538 pub fn is_mutable_static(&self, def_id: DefId) -> bool {
539 self.static_mutability(def_id) == Some(hir::Mutability::Mut)
542 /// Get the type of the pointer to the static that we use in MIR.
543 pub fn static_ptr_ty(&self, def_id: DefId) -> Ty<'tcx> {
544 // Make sure that any constants in the static's type are evaluated.
545 let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
547 if self.is_mutable_static(def_id) {
548 self.mk_mut_ptr(static_ty)
550 self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
554 /// Expands the given impl trait type, stopping if the type is recursive.
555 pub fn try_expand_impl_trait_type(
558 substs: SubstsRef<'tcx>,
559 ) -> Result<Ty<'tcx>, Ty<'tcx>> {
560 use crate::ty::fold::TypeFolder;
562 struct OpaqueTypeExpander<'tcx> {
563 // Contains the DefIds of the opaque types that are currently being
564 // expanded. When we expand an opaque type we insert the DefId of
565 // that type, and when we finish expanding that type we remove the
567 seen_opaque_tys: FxHashSet<DefId>,
568 // Cache of all expansions we've seen so far. This is a critical
569 // optimization for some large types produced by async fn trees.
570 expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
571 primary_def_id: DefId,
572 found_recursion: bool,
576 impl<'tcx> OpaqueTypeExpander<'tcx> {
580 substs: SubstsRef<'tcx>,
581 ) -> Option<Ty<'tcx>> {
582 if self.found_recursion {
585 let substs = substs.fold_with(self);
586 if self.seen_opaque_tys.insert(def_id) {
587 let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
588 Some(expanded_ty) => expanded_ty,
590 let generic_ty = self.tcx.type_of(def_id);
591 let concrete_ty = generic_ty.subst(self.tcx, substs);
592 let expanded_ty = self.fold_ty(concrete_ty);
593 self.expanded_cache.insert((def_id, substs), expanded_ty);
597 self.seen_opaque_tys.remove(&def_id);
600 // If another opaque type that we contain is recursive, then it
601 // will report the error, so we don't have to.
602 self.found_recursion = def_id == self.primary_def_id;
608 impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
609 fn tcx(&self) -> TyCtxt<'tcx> {
613 fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
614 if let ty::Opaque(def_id, substs) = t.kind {
615 self.expand_opaque_ty(def_id, substs).unwrap_or(t)
616 } else if t.has_projections() {
617 t.super_fold_with(self)
624 let mut visitor = OpaqueTypeExpander {
625 seen_opaque_tys: FxHashSet::default(),
626 expanded_cache: FxHashMap::default(),
627 primary_def_id: def_id,
628 found_recursion: false,
631 let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
632 if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
636 impl<'tcx> ty::TyS<'tcx> {
637 /// Returns the maximum value for the given numeric type (including `char`s)
638 /// or returns `None` if the type is not numeric.
639 pub fn numeric_max_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
640 let val = match self.kind {
641 ty::Int(_) | ty::Uint(_) => {
642 let (size, signed) = int_size_and_signed(tcx, self);
643 let val = if signed { signed_max(size) as u128 } else { unsigned_max(size) };
646 ty::Char => Some(std::char::MAX as u128),
647 ty::Float(fty) => Some(match fty {
648 ast::FloatTy::F32 => ::rustc_apfloat::ieee::Single::INFINITY.to_bits(),
649 ast::FloatTy::F64 => ::rustc_apfloat::ieee::Double::INFINITY.to_bits(),
653 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
656 /// Returns the minimum value for the given numeric type (including `char`s)
657 /// or returns `None` if the type is not numeric.
658 pub fn numeric_min_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
659 let val = match self.kind {
660 ty::Int(_) | ty::Uint(_) => {
661 let (size, signed) = int_size_and_signed(tcx, self);
662 let val = if signed { truncate(signed_min(size) as u128, size) } else { 0 };
666 ty::Float(fty) => Some(match fty {
667 ast::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
668 ast::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
672 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
675 /// Checks whether values of this type `T` are *moved* or *copied*
676 /// when referenced -- this amounts to a check for whether `T:
677 /// Copy`, but note that we **don't** consider lifetimes when
678 /// doing this check. This means that we may generate MIR which
679 /// does copies even when the type actually doesn't satisfy the
680 /// full requirements for the `Copy` trait (cc #29149) -- this
681 /// winds up being reported as an error during NLL borrow check.
682 pub fn is_copy_modulo_regions(
685 param_env: ty::ParamEnv<'tcx>,
688 tcx.at(span).is_copy_raw(param_env.and(self))
691 /// Checks whether values of this type `T` have a size known at
692 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
693 /// for the purposes of this check, so it can be an
694 /// over-approximation in generic contexts, where one can have
695 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
696 /// actually carry lifetime requirements.
697 pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
698 tcx_at.is_sized_raw(param_env.and(self))
701 /// Checks whether values of this type `T` implement the `Freeze`
702 /// trait -- frozen types are those that do not contain a
703 /// `UnsafeCell` anywhere. This is a language concept used to
704 /// distinguish "true immutability", which is relevant to
705 /// optimization as well as the rules around static values. Note
706 /// that the `Freeze` trait is not exposed to end users and is
707 /// effectively an implementation detail.
711 param_env: ty::ParamEnv<'tcx>,
714 tcx.at(span).is_freeze_raw(param_env.and(self))
717 /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
718 /// non-copy and *might* have a destructor attached; if it returns
719 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
721 /// (Note that this implies that if `ty` has a destructor attached,
722 /// then `needs_drop` will definitely return `true` for `ty`.)
724 /// Note that this method is used to check eligible types in unions.
726 pub fn needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
727 tcx.needs_drop_raw(param_env.and(self)).0
730 pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
731 match (&a.kind, &b.kind) {
732 (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
737 substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b))
743 /// Check whether a type is representable. This means it cannot contain unboxed
744 /// structural recursion. This check is needed for structs and enums.
745 pub fn is_representable(&'tcx self, tcx: TyCtxt<'tcx>, sp: Span) -> Representability {
746 // Iterate until something non-representable is found
747 fn fold_repr<It: Iterator<Item = Representability>>(iter: It) -> Representability {
748 iter.fold(Representability::Representable, |r1, r2| match (r1, r2) {
749 (Representability::SelfRecursive(v1), Representability::SelfRecursive(v2)) => {
750 Representability::SelfRecursive(v1.into_iter().chain(v2).collect())
752 (r1, r2) => cmp::max(r1, r2),
756 fn are_inner_types_recursive<'tcx>(
759 seen: &mut Vec<Ty<'tcx>>,
760 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
762 ) -> Representability {
765 // Find non representable
766 fold_repr(ty.tuple_fields().map(|ty| {
767 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
770 // Fixed-length vectors.
771 // FIXME(#11924) Behavior undecided for zero-length vectors.
773 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
775 Adt(def, substs) => {
776 // Find non representable fields with their spans
777 fold_repr(def.all_fields().map(|field| {
778 let ty = field.ty(tcx, substs);
779 let span = tcx.hir().span_if_local(field.did).unwrap_or(sp);
780 match is_type_structurally_recursive(
787 Representability::SelfRecursive(_) => {
788 Representability::SelfRecursive(vec![span])
795 // this check is run on type definitions, so we don't expect
796 // to see closure types
797 bug!("requires check invoked on inapplicable type: {:?}", ty)
799 _ => Representability::Representable,
803 fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
805 Adt(ty_def, _) => ty_def == def,
810 // Does the type `ty` directly (without indirection through a pointer)
811 // contain any types on stack `seen`?
812 fn is_type_structurally_recursive<'tcx>(
815 seen: &mut Vec<Ty<'tcx>>,
816 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
818 ) -> Representability {
819 debug!("is_type_structurally_recursive: {:?} {:?}", ty, sp);
820 if let Some(representability) = representable_cache.get(ty) {
822 "is_type_structurally_recursive: {:?} {:?} - (cached) {:?}",
823 ty, sp, representability
825 return representability.clone();
828 let representability =
829 is_type_structurally_recursive_inner(tcx, sp, seen, representable_cache, ty);
831 representable_cache.insert(ty, representability.clone());
835 fn is_type_structurally_recursive_inner<'tcx>(
838 seen: &mut Vec<Ty<'tcx>>,
839 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
841 ) -> Representability {
845 // Iterate through stack of previously seen types.
846 let mut iter = seen.iter();
848 // The first item in `seen` is the type we are actually curious about.
849 // We want to return SelfRecursive if this type contains itself.
850 // It is important that we DON'T take generic parameters into account
851 // for this check, so that Bar<T> in this example counts as SelfRecursive:
854 // struct Bar<T> { x: Bar<Foo> }
856 if let Some(&seen_type) = iter.next() {
857 if same_struct_or_enum(seen_type, def) {
858 debug!("SelfRecursive: {:?} contains {:?}", seen_type, ty);
859 return Representability::SelfRecursive(vec![sp]);
863 // We also need to know whether the first item contains other types
864 // that are structurally recursive. If we don't catch this case, we
865 // will recurse infinitely for some inputs.
867 // It is important that we DO take generic parameters into account
868 // here, so that code like this is considered SelfRecursive, not
869 // ContainsRecursive:
871 // struct Foo { Option<Option<Foo>> }
873 for &seen_type in iter {
874 if ty::TyS::same_type(ty, seen_type) {
875 debug!("ContainsRecursive: {:?} contains {:?}", seen_type, ty);
876 return Representability::ContainsRecursive;
881 // For structs and enums, track all previously seen types by pushing them
882 // onto the 'seen' stack.
884 let out = are_inner_types_recursive(tcx, sp, seen, representable_cache, ty);
889 // No need to push in other cases.
890 are_inner_types_recursive(tcx, sp, seen, representable_cache, ty)
895 debug!("is_type_representable: {:?}", self);
897 // To avoid a stack overflow when checking an enum variant or struct that
898 // contains a different, structurally recursive type, maintain a stack
899 // of seen types and check recursion for each of them (issues #3008, #3779).
900 let mut seen: Vec<Ty<'_>> = Vec::new();
901 let mut representable_cache = FxHashMap::default();
902 let r = is_type_structurally_recursive(tcx, sp, &mut seen, &mut representable_cache, self);
903 debug!("is_type_representable: {:?} is {:?}", self, r);
907 /// Peel off all reference types in this type until there are none left.
909 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
914 /// - `&'a mut u8` -> `u8`
915 /// - `&'a &'b u8` -> `u8`
916 /// - `&'a *const &'b u8 -> *const &'b u8`
917 pub fn peel_refs(&'tcx self) -> Ty<'tcx> {
919 while let Ref(_, inner_ty, _) = ty.kind {
926 #[derive(Clone, HashStable)]
927 pub struct NeedsDrop(pub bool);
929 pub enum ExplicitSelf<'tcx> {
931 ByReference(ty::Region<'tcx>, hir::Mutability),
932 ByRawPointer(hir::Mutability),
937 impl<'tcx> ExplicitSelf<'tcx> {
938 /// Categorizes an explicit self declaration like `self: SomeType`
939 /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
941 /// This is mainly used to require the arbitrary_self_types feature
942 /// in the case of `Other`, to improve error messages in the common cases,
943 /// and to make `Other` non-object-safe.
948 /// impl<'a> Foo for &'a T {
949 /// // Legal declarations:
950 /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
951 /// fn method2(self: &'a T); // ExplicitSelf::ByValue
952 /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
953 /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
955 /// // Invalid cases will be caught by `check_method_receiver`:
956 /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
957 /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
958 /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
962 pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
964 P: Fn(Ty<'tcx>) -> bool,
966 use self::ExplicitSelf::*;
968 match self_arg_ty.kind {
969 _ if is_self_ty(self_arg_ty) => ByValue,
970 ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
971 ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
972 ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,