1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
3 use crate::ich::NodeIdHashingMode;
4 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
5 use crate::ty::fold::TypeFolder;
6 use crate::ty::layout::IntegerExt;
7 use crate::ty::query::TyCtxtAt;
8 use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
9 use crate::ty::TyKind::*;
10 use crate::ty::{self, DefIdTree, List, Ty, TyCtxt, TypeFoldable};
11 use rustc_apfloat::Float as _;
13 use rustc_attr::{self as attr, SignedInt, UnsignedInt};
14 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
15 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
16 use rustc_errors::ErrorReported;
18 use rustc_hir::def::DefKind;
19 use rustc_hir::def_id::DefId;
20 use rustc_macros::HashStable;
21 use rustc_span::{Span, DUMMY_SP};
22 use rustc_target::abi::{Integer, Size, TargetDataLayout};
23 use smallvec::SmallVec;
26 #[derive(Copy, Clone, Debug)]
27 pub struct Discr<'tcx> {
28 /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
33 impl<'tcx> fmt::Display for Discr<'tcx> {
34 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
35 match *self.ty.kind() {
37 let size = ty::tls::with(|tcx| Integer::from_int_ty(&tcx, ity).size());
39 // sign extend the raw representation to be an i128
40 let x = size.sign_extend(x) as i128;
43 _ => write!(fmt, "{}", self.val),
48 fn signed_min(size: Size) -> i128 {
49 size.sign_extend(1_u128 << (size.bits() - 1)) as i128
52 fn signed_max(size: Size) -> i128 {
53 i128::MAX >> (128 - size.bits())
56 fn unsigned_max(size: Size) -> u128 {
57 u128::MAX >> (128 - size.bits())
60 fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
61 let (int, signed) = match *ty.kind() {
62 Int(ity) => (Integer::from_int_ty(&tcx, ity), true),
63 Uint(uty) => (Integer::from_uint_ty(&tcx, uty), false),
64 _ => bug!("non integer discriminant"),
69 impl<'tcx> Discr<'tcx> {
70 /// Adds `1` to the value and wraps around if the maximum for the type is reached.
71 pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
72 self.checked_add(tcx, 1).0
74 pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
75 let (size, signed) = int_size_and_signed(tcx, self.ty);
76 let (val, oflo) = if signed {
77 let min = signed_min(size);
78 let max = signed_max(size);
79 let val = size.sign_extend(self.val) as i128;
80 assert!(n < (i128::MAX as u128));
82 let oflo = val > max - n;
83 let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
84 // zero the upper bits
85 let val = val as u128;
86 let val = size.truncate(val);
89 let max = unsigned_max(size);
91 let oflo = val > max - n;
92 let val = if oflo { n - (max - val) - 1 } else { val + n };
95 (Self { val, ty: self.ty }, oflo)
99 pub trait IntTypeExt {
100 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
101 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
102 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
105 impl IntTypeExt for attr::IntType {
106 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
108 SignedInt(ast::IntTy::I8) => tcx.types.i8,
109 SignedInt(ast::IntTy::I16) => tcx.types.i16,
110 SignedInt(ast::IntTy::I32) => tcx.types.i32,
111 SignedInt(ast::IntTy::I64) => tcx.types.i64,
112 SignedInt(ast::IntTy::I128) => tcx.types.i128,
113 SignedInt(ast::IntTy::Isize) => tcx.types.isize,
114 UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
115 UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
116 UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
117 UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
118 UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
119 UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
123 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
124 Discr { val: 0, ty: self.to_ty(tcx) }
127 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
128 if let Some(val) = val {
129 assert_eq!(self.to_ty(tcx), val.ty);
130 let (new, oflo) = val.checked_add(tcx, 1);
131 if oflo { None } else { Some(new) }
133 Some(self.initial_discriminant(tcx))
138 /// Describes whether a type is representable. For types that are not
139 /// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
140 /// distinguish between types that are recursive with themselves and types that
141 /// contain a different recursive type. These cases can therefore be treated
142 /// differently when reporting errors.
144 /// The ordering of the cases is significant. They are sorted so that cmp::max
145 /// will keep the "more erroneous" of two values.
146 #[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
147 pub enum Representability {
150 SelfRecursive(Vec<Span>),
153 impl<'tcx> TyCtxt<'tcx> {
154 /// Creates a hash of the type `Ty` which will be the same no matter what crate
155 /// context it's calculated within. This is used by the `type_id` intrinsic.
156 pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
157 let mut hasher = StableHasher::new();
158 let mut hcx = self.create_stable_hashing_context();
160 // We want the type_id be independent of the types free regions, so we
161 // erase them. The erase_regions() call will also anonymize bound
162 // regions, which is desirable too.
163 let ty = self.erase_regions(ty);
165 hcx.while_hashing_spans(false, |hcx| {
166 hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
167 ty.hash_stable(hcx, &mut hasher);
173 pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
174 if let ty::Adt(def, substs) = *ty.kind() {
175 for field in def.all_fields() {
176 let field_ty = field.ty(self, substs);
177 if let Error(_) = field_ty.kind() {
185 /// Attempts to returns the deeply last field of nested structures, but
186 /// does not apply any normalization in its search. Returns the same type
187 /// if input `ty` is not a structure at all.
188 pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
190 tcx.struct_tail_with_normalize(ty, |ty| ty)
193 /// Returns the deeply last field of nested structures, or the same type if
194 /// not a structure at all. Corresponds to the only possible unsized field,
195 /// and its type can be used to determine unsizing strategy.
197 /// Should only be called if `ty` has no inference variables and does not
198 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
199 /// normalization attempt may cause compiler bugs.
200 pub fn struct_tail_erasing_lifetimes(
203 param_env: ty::ParamEnv<'tcx>,
206 tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty))
209 /// Returns the deeply last field of nested structures, or the same type if
210 /// not a structure at all. Corresponds to the only possible unsized field,
211 /// and its type can be used to determine unsizing strategy.
213 /// This is parameterized over the normalization strategy (i.e. how to
214 /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
215 /// function to indicate no normalization should take place.
217 /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
219 pub fn struct_tail_with_normalize(
222 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
224 for iteration in 0.. {
225 if !self.sess.recursion_limit().value_within_limit(iteration) {
226 return self.ty_error_with_message(
228 &format!("reached the recursion limit finding the struct tail for {}", ty),
232 ty::Adt(def, substs) => {
233 if !def.is_struct() {
236 match def.non_enum_variant().fields.last() {
237 Some(f) => ty = f.ty(self, substs),
243 if let Some((&last_ty, _)) = tys.split_last() {
244 ty = last_ty.expect_ty();
250 ty::Projection(_) | ty::Opaque(..) => {
251 let normalized = normalize(ty);
252 if ty == normalized {
267 /// Same as applying `struct_tail` on `source` and `target`, but only
268 /// keeps going as long as the two types are instances of the same
269 /// structure definitions.
270 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
271 /// whereas struct_tail produces `T`, and `Trait`, respectively.
273 /// Should only be called if the types have no inference variables and do
274 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
275 /// normalization attempt may cause compiler bugs.
276 pub fn struct_lockstep_tails_erasing_lifetimes(
280 param_env: ty::ParamEnv<'tcx>,
281 ) -> (Ty<'tcx>, Ty<'tcx>) {
283 tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
284 tcx.normalize_erasing_regions(param_env, ty)
288 /// Same as applying `struct_tail` on `source` and `target`, but only
289 /// keeps going as long as the two types are instances of the same
290 /// structure definitions.
291 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
292 /// whereas struct_tail produces `T`, and `Trait`, respectively.
294 /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
296 pub fn struct_lockstep_tails_with_normalize(
300 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
301 ) -> (Ty<'tcx>, Ty<'tcx>) {
302 let (mut a, mut b) = (source, target);
304 match (&a.kind(), &b.kind()) {
305 (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
306 if a_def == b_def && a_def.is_struct() =>
308 if let Some(f) = a_def.non_enum_variant().fields.last() {
309 a = f.ty(self, a_substs);
310 b = f.ty(self, b_substs);
315 (&Tuple(a_tys), &Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
316 if let Some(a_last) = a_tys.last() {
317 a = a_last.expect_ty();
318 b = b_tys.last().unwrap().expect_ty();
323 (ty::Projection(_) | ty::Opaque(..), _)
324 | (_, ty::Projection(_) | ty::Opaque(..)) => {
325 // If either side is a projection, attempt to
326 // progress via normalization. (Should be safe to
327 // apply to both sides as normalization is
329 let a_norm = normalize(a);
330 let b_norm = normalize(b);
331 if a == a_norm && b == b_norm {
345 /// Calculate the destructor of a given type.
346 pub fn calculate_dtor(
349 validate: impl Fn(Self, DefId) -> Result<(), ErrorReported>,
350 ) -> Option<ty::Destructor> {
351 let drop_trait = self.lang_items().drop_trait()?;
352 self.ensure().coherent_trait(drop_trait);
354 let ty = self.type_of(adt_did);
355 let dtor_did = self.find_map_relevant_impl(drop_trait, ty, |impl_did| {
356 if let Some(item) = self.associated_items(impl_did).in_definition_order().next() {
357 if validate(self, impl_did).is_ok() {
358 return Some(item.def_id);
364 Some(ty::Destructor { did: dtor_did? })
367 /// Returns the set of types that are required to be alive in
368 /// order to run the destructor of `def` (see RFCs 769 and
371 /// Note that this returns only the constraints for the
372 /// destructor of `def` itself. For the destructors of the
373 /// contents, you need `adt_dtorck_constraint`.
374 pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
375 let dtor = match def.destructor(self) {
377 debug!("destructor_constraints({:?}) - no dtor", def.did);
380 Some(dtor) => dtor.did,
383 let impl_def_id = self.associated_item(dtor).container.id();
384 let impl_generics = self.generics_of(impl_def_id);
386 // We have a destructor - all the parameters that are not
387 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
390 // We need to return the list of parameters from the ADTs
391 // generics/substs that correspond to impure parameters on the
392 // impl's generics. This is a bit ugly, but conceptually simple:
394 // Suppose our ADT looks like the following
396 // struct S<X, Y, Z>(X, Y, Z);
400 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
402 // We want to return the parameters (X, Y). For that, we match
403 // up the item-substs <X, Y, Z> with the substs on the impl ADT,
404 // <P1, P2, P0>, and then look up which of the impl substs refer to
405 // parameters marked as pure.
407 let impl_substs = match *self.type_of(impl_def_id).kind() {
408 ty::Adt(def_, substs) if def_ == def => substs,
412 let item_substs = match *self.type_of(def.did).kind() {
413 ty::Adt(def_, substs) if def_ == def => substs,
417 let result = item_substs
419 .zip(impl_substs.iter())
422 GenericArgKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => {
423 !impl_generics.region_param(ebr, self).pure_wrt_drop
425 GenericArgKind::Type(&ty::TyS { kind: ty::Param(ref pt), .. }) => {
426 !impl_generics.type_param(pt, self).pure_wrt_drop
428 GenericArgKind::Const(&ty::Const {
429 val: ty::ConstKind::Param(ref pc), ..
430 }) => !impl_generics.const_param(pc, self).pure_wrt_drop,
431 GenericArgKind::Lifetime(_)
432 | GenericArgKind::Type(_)
433 | GenericArgKind::Const(_) => {
434 // Not a type, const or region param: this should be reported
440 .map(|(item_param, _)| item_param)
442 debug!("destructor_constraint({:?}) = {:?}", def.did, result);
446 /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
447 /// that closures have a `DefId`, but the closure *expression* also
448 /// has a `HirId` that is located within the context where the
449 /// closure appears (and, sadly, a corresponding `NodeId`, since
450 /// those are not yet phased out). The parent of the closure's
451 /// `DefId` will also be the context where it appears.
452 pub fn is_closure(self, def_id: DefId) -> bool {
453 matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
456 /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
457 pub fn is_trait(self, def_id: DefId) -> bool {
458 self.def_kind(def_id) == DefKind::Trait
461 /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
462 /// and `false` otherwise.
463 pub fn is_trait_alias(self, def_id: DefId) -> bool {
464 self.def_kind(def_id) == DefKind::TraitAlias
467 /// Returns `true` if this `DefId` refers to the implicit constructor for
468 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
469 pub fn is_constructor(self, def_id: DefId) -> bool {
470 matches!(self.def_kind(def_id), DefKind::Ctor(..))
473 /// Given the def-ID of a fn or closure, returns the def-ID of
474 /// the innermost fn item that the closure is contained within.
475 /// This is a significant `DefId` because, when we do
476 /// type-checking, we type-check this fn item and all of its
477 /// (transitive) closures together. Therefore, when we fetch the
478 /// `typeck` the closure, for example, we really wind up
479 /// fetching the `typeck` the enclosing fn item.
480 pub fn closure_base_def_id(self, def_id: DefId) -> DefId {
481 let mut def_id = def_id;
482 while self.is_closure(def_id) {
483 def_id = self.parent(def_id).unwrap_or_else(|| {
484 bug!("closure {:?} has no parent", def_id);
490 /// Given the `DefId` and substs a closure, creates the type of
491 /// `self` argument that the closure expects. For example, for a
492 /// `Fn` closure, this would return a reference type `&T` where
493 /// `T = closure_ty`.
495 /// Returns `None` if this closure's kind has not yet been inferred.
496 /// This should only be possible during type checking.
498 /// Note that the return value is a late-bound region and hence
499 /// wrapped in a binder.
500 pub fn closure_env_ty(
502 closure_def_id: DefId,
503 closure_substs: SubstsRef<'tcx>,
504 ) -> Option<ty::Binder<Ty<'tcx>>> {
505 let closure_ty = self.mk_closure(closure_def_id, closure_substs);
506 let br = ty::BoundRegion { kind: ty::BrEnv };
507 let env_region = ty::ReLateBound(ty::INNERMOST, br);
508 let closure_kind_ty = closure_substs.as_closure().kind_ty();
509 let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
510 let env_ty = match closure_kind {
511 ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
512 ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
513 ty::ClosureKind::FnOnce => closure_ty,
515 Some(ty::Binder::bind(env_ty))
518 /// Returns `true` if the node pointed to by `def_id` is a `static` item.
519 pub fn is_static(self, def_id: DefId) -> bool {
520 self.static_mutability(def_id).is_some()
523 /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
524 pub fn is_thread_local_static(self, def_id: DefId) -> bool {
525 self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
528 /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
529 pub fn is_mutable_static(self, def_id: DefId) -> bool {
530 self.static_mutability(def_id) == Some(hir::Mutability::Mut)
533 /// Get the type of the pointer to the static that we use in MIR.
534 pub fn static_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
535 // Make sure that any constants in the static's type are evaluated.
536 let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
538 // Make sure that accesses to unsafe statics end up using raw pointers.
539 // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
540 if self.is_mutable_static(def_id) {
541 self.mk_mut_ptr(static_ty)
542 } else if self.is_foreign_item(def_id) {
543 self.mk_imm_ptr(static_ty)
545 self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
549 /// Expands the given impl trait type, stopping if the type is recursive.
550 pub fn try_expand_impl_trait_type(
553 substs: SubstsRef<'tcx>,
554 ) -> Result<Ty<'tcx>, Ty<'tcx>> {
555 let mut visitor = OpaqueTypeExpander {
556 seen_opaque_tys: FxHashSet::default(),
557 expanded_cache: FxHashMap::default(),
558 primary_def_id: Some(def_id),
559 found_recursion: false,
560 check_recursion: true,
564 let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
565 if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
569 struct OpaqueTypeExpander<'tcx> {
570 // Contains the DefIds of the opaque types that are currently being
571 // expanded. When we expand an opaque type we insert the DefId of
572 // that type, and when we finish expanding that type we remove the
574 seen_opaque_tys: FxHashSet<DefId>,
575 // Cache of all expansions we've seen so far. This is a critical
576 // optimization for some large types produced by async fn trees.
577 expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
578 primary_def_id: Option<DefId>,
579 found_recursion: bool,
580 /// Whether or not to check for recursive opaque types.
581 /// This is `true` when we're explicitly checking for opaque type
582 /// recursion, and 'false' otherwise to avoid unnecessary work.
583 check_recursion: bool,
587 impl<'tcx> OpaqueTypeExpander<'tcx> {
588 fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
589 if self.found_recursion {
592 let substs = substs.fold_with(self);
593 if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
594 let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
595 Some(expanded_ty) => expanded_ty,
597 let generic_ty = self.tcx.type_of(def_id);
598 let concrete_ty = generic_ty.subst(self.tcx, substs);
599 let expanded_ty = self.fold_ty(concrete_ty);
600 self.expanded_cache.insert((def_id, substs), expanded_ty);
604 if self.check_recursion {
605 self.seen_opaque_tys.remove(&def_id);
609 // If another opaque type that we contain is recursive, then it
610 // will report the error, so we don't have to.
611 self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
617 impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
618 fn tcx(&self) -> TyCtxt<'tcx> {
622 fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
623 if let ty::Opaque(def_id, substs) = t.kind {
624 self.expand_opaque_ty(def_id, substs).unwrap_or(t)
625 } else if t.has_opaque_types() {
626 t.super_fold_with(self)
633 impl<'tcx> ty::TyS<'tcx> {
634 /// Returns the maximum value for the given numeric type (including `char`s)
635 /// or returns `None` if the type is not numeric.
636 pub fn numeric_max_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
637 let val = match self.kind() {
638 ty::Int(_) | ty::Uint(_) => {
639 let (size, signed) = int_size_and_signed(tcx, self);
640 let val = if signed { signed_max(size) as u128 } else { unsigned_max(size) };
643 ty::Char => Some(std::char::MAX as u128),
644 ty::Float(fty) => Some(match fty {
645 ty::FloatTy::F32 => rustc_apfloat::ieee::Single::INFINITY.to_bits(),
646 ty::FloatTy::F64 => rustc_apfloat::ieee::Double::INFINITY.to_bits(),
650 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
653 /// Returns the minimum value for the given numeric type (including `char`s)
654 /// or returns `None` if the type is not numeric.
655 pub fn numeric_min_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
656 let val = match self.kind() {
657 ty::Int(_) | ty::Uint(_) => {
658 let (size, signed) = int_size_and_signed(tcx, self);
659 let val = if signed { size.truncate(signed_min(size) as u128) } else { 0 };
663 ty::Float(fty) => Some(match fty {
664 ty::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
665 ty::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
669 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
672 /// Checks whether values of this type `T` are *moved* or *copied*
673 /// when referenced -- this amounts to a check for whether `T:
674 /// Copy`, but note that we **don't** consider lifetimes when
675 /// doing this check. This means that we may generate MIR which
676 /// does copies even when the type actually doesn't satisfy the
677 /// full requirements for the `Copy` trait (cc #29149) -- this
678 /// winds up being reported as an error during NLL borrow check.
679 pub fn is_copy_modulo_regions(
681 tcx_at: TyCtxtAt<'tcx>,
682 param_env: ty::ParamEnv<'tcx>,
684 tcx_at.is_copy_raw(param_env.and(self))
687 /// Checks whether values of this type `T` have a size known at
688 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
689 /// for the purposes of this check, so it can be an
690 /// over-approximation in generic contexts, where one can have
691 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
692 /// actually carry lifetime requirements.
693 pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
694 self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
697 /// Checks whether values of this type `T` implement the `Freeze`
698 /// trait -- frozen types are those that do not contain a
699 /// `UnsafeCell` anywhere. This is a language concept used to
700 /// distinguish "true immutability", which is relevant to
701 /// optimization as well as the rules around static values. Note
702 /// that the `Freeze` trait is not exposed to end users and is
703 /// effectively an implementation detail.
704 // FIXME: use `TyCtxtAt` instead of separate `Span`.
705 pub fn is_freeze(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
706 self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
709 /// Fast path helper for testing if a type is `Freeze`.
711 /// Returning true means the type is known to be `Freeze`. Returning
712 /// `false` means nothing -- could be `Freeze`, might not be.
713 fn is_trivially_freeze(&self) -> bool {
726 | ty::FnPtr(_) => true,
727 ty::Tuple(_) => self.tuple_fields().all(Self::is_trivially_freeze),
728 ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
735 | ty::GeneratorWitness(_)
740 | ty::Projection(_) => false,
744 /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
745 /// non-copy and *might* have a destructor attached; if it returns
746 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
748 /// (Note that this implies that if `ty` has a destructor attached,
749 /// then `needs_drop` will definitely return `true` for `ty`.)
751 /// Note that this method is used to check eligible types in unions.
753 pub fn needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
754 // Avoid querying in simple cases.
755 match needs_drop_components(self, &tcx.data_layout) {
756 Err(AlwaysRequiresDrop) => true,
758 let query_ty = match *components {
760 // If we've got a single component, call the query with that
761 // to increase the chance that we hit the query cache.
762 [component_ty] => component_ty,
765 // This doesn't depend on regions, so try to minimize distinct
767 let erased = tcx.normalize_erasing_regions(param_env, query_ty);
768 tcx.needs_drop_raw(param_env.and(erased))
773 /// Returns `true` if equality for this type is both reflexive and structural.
775 /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
777 /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
778 /// types, equality for the type as a whole is structural when it is the same as equality
779 /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
780 /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
783 /// This function is "shallow" because it may return `true` for a composite type whose fields
784 /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
785 /// because equality for arrays is determined by the equality of each array element. If you
786 /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
787 /// down, you will need to use a type visitor.
789 pub fn is_structural_eq_shallow(&'tcx self, tcx: TyCtxt<'tcx>) -> bool {
791 // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
792 Adt(..) => tcx.has_structural_eq_impls(self),
794 // Primitive types that satisfy `Eq`.
795 Bool | Char | Int(_) | Uint(_) | Str | Never => true,
797 // Composite types that satisfy `Eq` when all of their fields do.
799 // Because this function is "shallow", we return `true` for these composites regardless
800 // of the type(s) contained within.
801 Ref(..) | Array(..) | Slice(_) | Tuple(..) => true,
803 // Raw pointers use bitwise comparison.
804 RawPtr(_) | FnPtr(_) => true,
806 // Floating point numbers are not `Eq`.
809 // Conservatively return `false` for all others...
811 // Anonymous function types
812 FnDef(..) | Closure(..) | Dynamic(..) | Generator(..) => false,
814 // Generic or inferred types
816 // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
817 // called for known, fully-monomorphized types.
818 Projection(_) | Opaque(..) | Param(_) | Bound(..) | Placeholder(_) | Infer(_) => false,
820 Foreign(_) | GeneratorWitness(..) | Error(_) => false,
824 pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
825 match (&a.kind(), &b.kind()) {
826 (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
831 substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b))
837 /// Check whether a type is representable. This means it cannot contain unboxed
838 /// structural recursion. This check is needed for structs and enums.
839 pub fn is_representable(&'tcx self, tcx: TyCtxt<'tcx>, sp: Span) -> Representability {
840 // Iterate until something non-representable is found
841 fn fold_repr<It: Iterator<Item = Representability>>(iter: It) -> Representability {
842 iter.fold(Representability::Representable, |r1, r2| match (r1, r2) {
843 (Representability::SelfRecursive(v1), Representability::SelfRecursive(v2)) => {
844 Representability::SelfRecursive(v1.into_iter().chain(v2).collect())
846 (r1, r2) => cmp::max(r1, r2),
850 fn are_inner_types_recursive<'tcx>(
853 seen: &mut Vec<Ty<'tcx>>,
854 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
856 ) -> Representability {
859 // Find non representable
860 fold_repr(ty.tuple_fields().map(|ty| {
861 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
864 // Fixed-length vectors.
865 // FIXME(#11924) Behavior undecided for zero-length vectors.
867 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
869 Adt(def, substs) => {
870 // Find non representable fields with their spans
871 fold_repr(def.all_fields().map(|field| {
872 let ty = field.ty(tcx, substs);
873 let span = match field
876 .map(|id| tcx.hir().local_def_id_to_hir_id(id))
877 .and_then(|id| tcx.hir().find(id))
879 Some(hir::Node::Field(field)) => field.ty.span,
882 match is_type_structurally_recursive(
889 Representability::SelfRecursive(_) => {
890 Representability::SelfRecursive(vec![span])
897 // this check is run on type definitions, so we don't expect
898 // to see closure types
899 bug!("requires check invoked on inapplicable type: {:?}", ty)
901 _ => Representability::Representable,
905 fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
907 Adt(ty_def, _) => ty_def == def,
912 // Does the type `ty` directly (without indirection through a pointer)
913 // contain any types on stack `seen`?
914 fn is_type_structurally_recursive<'tcx>(
917 seen: &mut Vec<Ty<'tcx>>,
918 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
920 ) -> Representability {
921 debug!("is_type_structurally_recursive: {:?} {:?}", ty, sp);
922 if let Some(representability) = representable_cache.get(ty) {
924 "is_type_structurally_recursive: {:?} {:?} - (cached) {:?}",
925 ty, sp, representability
927 return representability.clone();
930 let representability =
931 is_type_structurally_recursive_inner(tcx, sp, seen, representable_cache, ty);
933 representable_cache.insert(ty, representability.clone());
937 fn is_type_structurally_recursive_inner<'tcx>(
940 seen: &mut Vec<Ty<'tcx>>,
941 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
943 ) -> Representability {
947 // Iterate through stack of previously seen types.
948 let mut iter = seen.iter();
950 // The first item in `seen` is the type we are actually curious about.
951 // We want to return SelfRecursive if this type contains itself.
952 // It is important that we DON'T take generic parameters into account
953 // for this check, so that Bar<T> in this example counts as SelfRecursive:
956 // struct Bar<T> { x: Bar<Foo> }
958 if let Some(&seen_type) = iter.next() {
959 if same_struct_or_enum(seen_type, *def) {
960 debug!("SelfRecursive: {:?} contains {:?}", seen_type, ty);
961 return Representability::SelfRecursive(vec![sp]);
965 // We also need to know whether the first item contains other types
966 // that are structurally recursive. If we don't catch this case, we
967 // will recurse infinitely for some inputs.
969 // It is important that we DO take generic parameters into account
970 // here, so that code like this is considered SelfRecursive, not
971 // ContainsRecursive:
973 // struct Foo { Option<Option<Foo>> }
975 for &seen_type in iter {
976 if ty::TyS::same_type(ty, seen_type) {
977 debug!("ContainsRecursive: {:?} contains {:?}", seen_type, ty);
978 return Representability::ContainsRecursive;
983 // For structs and enums, track all previously seen types by pushing them
984 // onto the 'seen' stack.
986 let out = are_inner_types_recursive(tcx, sp, seen, representable_cache, ty);
991 // No need to push in other cases.
992 are_inner_types_recursive(tcx, sp, seen, representable_cache, ty)
997 debug!("is_type_representable: {:?}", self);
999 // To avoid a stack overflow when checking an enum variant or struct that
1000 // contains a different, structurally recursive type, maintain a stack
1001 // of seen types and check recursion for each of them (issues #3008, #3779).
1002 let mut seen: Vec<Ty<'_>> = Vec::new();
1003 let mut representable_cache = FxHashMap::default();
1004 let r = is_type_structurally_recursive(tcx, sp, &mut seen, &mut representable_cache, self);
1005 debug!("is_type_representable: {:?} is {:?}", self, r);
1009 /// Peel off all reference types in this type until there are none left.
1011 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
1016 /// - `&'a mut u8` -> `u8`
1017 /// - `&'a &'b u8` -> `u8`
1018 /// - `&'a *const &'b u8 -> *const &'b u8`
1019 pub fn peel_refs(&'tcx self) -> Ty<'tcx> {
1021 while let Ref(_, inner_ty, _) = ty.kind() {
1028 pub enum ExplicitSelf<'tcx> {
1030 ByReference(ty::Region<'tcx>, hir::Mutability),
1031 ByRawPointer(hir::Mutability),
1036 impl<'tcx> ExplicitSelf<'tcx> {
1037 /// Categorizes an explicit self declaration like `self: SomeType`
1038 /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
1040 /// This is mainly used to require the arbitrary_self_types feature
1041 /// in the case of `Other`, to improve error messages in the common cases,
1042 /// and to make `Other` non-object-safe.
1047 /// impl<'a> Foo for &'a T {
1048 /// // Legal declarations:
1049 /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
1050 /// fn method2(self: &'a T); // ExplicitSelf::ByValue
1051 /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
1052 /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
1054 /// // Invalid cases will be caught by `check_method_receiver`:
1055 /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
1056 /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
1057 /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
1061 pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
1063 P: Fn(Ty<'tcx>) -> bool,
1065 use self::ExplicitSelf::*;
1067 match *self_arg_ty.kind() {
1068 _ if is_self_ty(self_arg_ty) => ByValue,
1069 ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
1070 ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
1071 ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
1077 /// Returns a list of types such that the given type needs drop if and only if
1078 /// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
1079 /// this type always needs drop.
1080 pub fn needs_drop_components(
1082 target_layout: &TargetDataLayout,
1083 ) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
1085 ty::Infer(ty::FreshIntTy(_))
1086 | ty::Infer(ty::FreshFloatTy(_))
1095 | ty::GeneratorWitness(..)
1098 | ty::Str => Ok(SmallVec::new()),
1100 // Foreign types can never have destructors.
1101 ty::Foreign(..) => Ok(SmallVec::new()),
1103 ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
1105 ty::Slice(ty) => needs_drop_components(ty, target_layout),
1106 ty::Array(elem_ty, size) => {
1107 match needs_drop_components(elem_ty, target_layout) {
1108 Ok(v) if v.is_empty() => Ok(v),
1109 res => match size.val.try_to_bits(target_layout.pointer_size) {
1110 // Arrays of size zero don't need drop, even if their element
1112 Some(0) => Ok(SmallVec::new()),
1114 // We don't know which of the cases above we are in, so
1115 // return the whole type and let the caller decide what to
1117 None => Ok(smallvec![ty]),
1121 // If any field needs drop, then the whole tuple does.
1122 ty::Tuple(..) => ty.tuple_fields().try_fold(SmallVec::new(), move |mut acc, elem| {
1123 acc.extend(needs_drop_components(elem, target_layout)?);
1127 // These require checking for `Copy` bounds or `Adt` destructors.
1129 | ty::Projection(..)
1132 | ty::Placeholder(..)
1136 | ty::Generator(..) => Ok(smallvec![ty]),
1140 // Does the equivalent of
1142 // let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
1143 // folder.tcx().intern_*(&v)
1145 pub fn fold_list<'tcx, F, T>(
1146 list: &'tcx ty::List<T>,
1148 intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>,
1149 ) -> &'tcx ty::List<T>
1151 F: TypeFolder<'tcx>,
1152 T: TypeFoldable<'tcx> + PartialEq + Copy,
1154 let mut iter = list.iter();
1155 // Look for the first element that changed
1156 if let Some((i, new_t)) = iter.by_ref().enumerate().find_map(|(i, t)| {
1157 let new_t = t.fold_with(folder);
1158 if new_t == t { None } else { Some((i, new_t)) }
1160 // An element changed, prepare to intern the resulting list
1161 let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len());
1162 new_list.extend_from_slice(&list[..i]);
1163 new_list.push(new_t);
1164 new_list.extend(iter.map(|t| t.fold_with(folder)));
1165 intern(folder.tcx(), &new_list)
1171 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
1172 pub struct AlwaysRequiresDrop;
1174 /// Normalizes all opaque types in the given value, replacing them
1175 /// with their underlying types.
1176 pub fn normalize_opaque_types(
1178 val: &'tcx List<ty::Predicate<'tcx>>,
1179 ) -> &'tcx List<ty::Predicate<'tcx>> {
1180 let mut visitor = OpaqueTypeExpander {
1181 seen_opaque_tys: FxHashSet::default(),
1182 expanded_cache: FxHashMap::default(),
1183 primary_def_id: None,
1184 found_recursion: false,
1185 check_recursion: false,
1188 val.fold_with(&mut visitor)
1191 pub fn provide(providers: &mut ty::query::Providers) {
1192 *providers = ty::query::Providers { normalize_opaque_types, ..*providers }