1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
3 use crate::ich::NodeIdHashingMode;
4 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
5 use crate::mir::interpret::{sign_extend, truncate};
6 use crate::ty::fold::TypeFolder;
7 use crate::ty::layout::IntegerExt;
8 use crate::ty::query::TyCtxtAt;
9 use crate::ty::subst::{GenericArgKind, InternalSubsts, Subst, SubstsRef};
10 use crate::ty::TyKind::*;
11 use crate::ty::{self, DefIdTree, GenericParamDefKind, List, Ty, TyCtxt, TypeFoldable};
12 use rustc_apfloat::Float as _;
14 use rustc_attr::{self as attr, SignedInt, UnsignedInt};
15 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
16 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
17 use rustc_errors::ErrorReported;
19 use rustc_hir::def::DefKind;
20 use rustc_hir::def_id::DefId;
21 use rustc_macros::HashStable;
23 use rustc_target::abi::{Integer, Size, TargetDataLayout};
24 use smallvec::SmallVec;
27 #[derive(Copy, Clone, Debug)]
28 pub struct Discr<'tcx> {
29 /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
34 impl<'tcx> fmt::Display for Discr<'tcx> {
35 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
38 let size = ty::tls::with(|tcx| Integer::from_attr(&tcx, SignedInt(ity)).size());
40 // sign extend the raw representation to be an i128
41 let x = sign_extend(x, size) as i128;
44 _ => write!(fmt, "{}", self.val),
49 fn signed_min(size: Size) -> i128 {
50 sign_extend(1_u128 << (size.bits() - 1), size) as i128
53 fn signed_max(size: Size) -> i128 {
54 i128::MAX >> (128 - size.bits())
57 fn unsigned_max(size: Size) -> u128 {
58 u128::MAX >> (128 - size.bits())
61 fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
62 let (int, signed) = match ty.kind {
63 Int(ity) => (Integer::from_attr(&tcx, SignedInt(ity)), true),
64 Uint(uty) => (Integer::from_attr(&tcx, UnsignedInt(uty)), false),
65 _ => bug!("non integer discriminant"),
70 impl<'tcx> Discr<'tcx> {
71 /// Adds `1` to the value and wraps around if the maximum for the type is reached.
72 pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
73 self.checked_add(tcx, 1).0
75 pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
76 let (size, signed) = int_size_and_signed(tcx, self.ty);
77 let (val, oflo) = if signed {
78 let min = signed_min(size);
79 let max = signed_max(size);
80 let val = sign_extend(self.val, size) as i128;
81 assert!(n < (i128::MAX as u128));
83 let oflo = val > max - n;
84 let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
85 // zero the upper bits
86 let val = val as u128;
87 let val = truncate(val, size);
90 let max = unsigned_max(size);
92 let oflo = val > max - n;
93 let val = if oflo { n - (max - val) - 1 } else { val + n };
96 (Self { val, ty: self.ty }, oflo)
100 pub trait IntTypeExt {
101 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
102 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
103 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
106 impl IntTypeExt for attr::IntType {
107 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
109 SignedInt(ast::IntTy::I8) => tcx.types.i8,
110 SignedInt(ast::IntTy::I16) => tcx.types.i16,
111 SignedInt(ast::IntTy::I32) => tcx.types.i32,
112 SignedInt(ast::IntTy::I64) => tcx.types.i64,
113 SignedInt(ast::IntTy::I128) => tcx.types.i128,
114 SignedInt(ast::IntTy::Isize) => tcx.types.isize,
115 UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
116 UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
117 UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
118 UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
119 UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
120 UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
124 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
125 Discr { val: 0, ty: self.to_ty(tcx) }
128 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
129 if let Some(val) = val {
130 assert_eq!(self.to_ty(tcx), val.ty);
131 let (new, oflo) = val.checked_add(tcx, 1);
132 if oflo { None } else { Some(new) }
134 Some(self.initial_discriminant(tcx))
139 /// Describes whether a type is representable. For types that are not
140 /// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
141 /// distinguish between types that are recursive with themselves and types that
142 /// contain a different recursive type. These cases can therefore be treated
143 /// differently when reporting errors.
145 /// The ordering of the cases is significant. They are sorted so that cmp::max
146 /// will keep the "more erroneous" of two values.
147 #[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
148 pub enum Representability {
151 SelfRecursive(Vec<Span>),
154 impl<'tcx> TyCtxt<'tcx> {
155 /// Creates a hash of the type `Ty` which will be the same no matter what crate
156 /// context it's calculated within. This is used by the `type_id` intrinsic.
157 pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
158 let mut hasher = StableHasher::new();
159 let mut hcx = self.create_stable_hashing_context();
161 // We want the type_id be independent of the types free regions, so we
162 // erase them. The erase_regions() call will also anonymize bound
163 // regions, which is desirable too.
164 let ty = self.erase_regions(&ty);
166 hcx.while_hashing_spans(false, |hcx| {
167 hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
168 ty.hash_stable(hcx, &mut hasher);
175 impl<'tcx> TyCtxt<'tcx> {
176 pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
177 if let ty::Adt(def, substs) = ty.kind {
178 for field in def.all_fields() {
179 let field_ty = field.ty(self, substs);
180 if let Error(_) = field_ty.kind {
188 /// Attempts to returns the deeply last field of nested structures, but
189 /// does not apply any normalization in its search. Returns the same type
190 /// if input `ty` is not a structure at all.
191 pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
193 tcx.struct_tail_with_normalize(ty, |ty| ty)
196 /// Returns the deeply last field of nested structures, or the same type if
197 /// not a structure at all. Corresponds to the only possible unsized field,
198 /// and its type can be used to determine unsizing strategy.
200 /// Should only be called if `ty` has no inference variables and does not
201 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
202 /// normalization attempt may cause compiler bugs.
203 pub fn struct_tail_erasing_lifetimes(
206 param_env: ty::ParamEnv<'tcx>,
209 tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty))
212 /// Returns the deeply last field of nested structures, or the same type if
213 /// not a structure at all. Corresponds to the only possible unsized field,
214 /// and its type can be used to determine unsizing strategy.
216 /// This is parameterized over the normalization strategy (i.e. how to
217 /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
218 /// function to indicate no normalization should take place.
220 /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
222 pub fn struct_tail_with_normalize(
225 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
229 ty::Adt(def, substs) => {
230 if !def.is_struct() {
233 match def.non_enum_variant().fields.last() {
234 Some(f) => ty = f.ty(self, substs),
240 if let Some((&last_ty, _)) = tys.split_last() {
241 ty = last_ty.expect_ty();
247 ty::Projection(_) | ty::Opaque(..) => {
248 let normalized = normalize(ty);
249 if ty == normalized {
264 /// Same as applying `struct_tail` on `source` and `target`, but only
265 /// keeps going as long as the two types are instances of the same
266 /// structure definitions.
267 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
268 /// whereas struct_tail produces `T`, and `Trait`, respectively.
270 /// Should only be called if the types have no inference variables and do
271 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
272 /// normalization attempt may cause compiler bugs.
273 pub fn struct_lockstep_tails_erasing_lifetimes(
277 param_env: ty::ParamEnv<'tcx>,
278 ) -> (Ty<'tcx>, Ty<'tcx>) {
280 tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
281 tcx.normalize_erasing_regions(param_env, ty)
285 /// Same as applying `struct_tail` on `source` and `target`, but only
286 /// keeps going as long as the two types are instances of the same
287 /// structure definitions.
288 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
289 /// whereas struct_tail produces `T`, and `Trait`, respectively.
291 /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
293 pub fn struct_lockstep_tails_with_normalize(
297 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
298 ) -> (Ty<'tcx>, Ty<'tcx>) {
299 let (mut a, mut b) = (source, target);
301 match (&a.kind, &b.kind) {
302 (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
303 if a_def == b_def && a_def.is_struct() =>
305 if let Some(f) = a_def.non_enum_variant().fields.last() {
306 a = f.ty(self, a_substs);
307 b = f.ty(self, b_substs);
312 (&Tuple(a_tys), &Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
313 if let Some(a_last) = a_tys.last() {
314 a = a_last.expect_ty();
315 b = b_tys.last().unwrap().expect_ty();
320 (ty::Projection(_) | ty::Opaque(..), _)
321 | (_, ty::Projection(_) | ty::Opaque(..)) => {
322 // If either side is a projection, attempt to
323 // progress via normalization. (Should be safe to
324 // apply to both sides as normalization is
326 let a_norm = normalize(a);
327 let b_norm = normalize(b);
328 if a == a_norm && b == b_norm {
342 /// Calculate the destructor of a given type.
343 pub fn calculate_dtor(
346 validate: &mut dyn FnMut(Self, DefId) -> Result<(), ErrorReported>,
347 ) -> Option<ty::Destructor> {
348 let drop_trait = self.lang_items().drop_trait()?;
349 self.ensure().coherent_trait(drop_trait);
351 let mut dtor_did = None;
352 let ty = self.type_of(adt_did);
353 self.for_each_relevant_impl(drop_trait, ty, |impl_did| {
354 if let Some(item) = self.associated_items(impl_did).in_definition_order().next() {
355 if validate(self, impl_did).is_ok() {
356 dtor_did = Some(item.def_id);
361 Some(ty::Destructor { did: dtor_did? })
364 /// Returns the set of types that are required to be alive in
365 /// order to run the destructor of `def` (see RFCs 769 and
368 /// Note that this returns only the constraints for the
369 /// destructor of `def` itself. For the destructors of the
370 /// contents, you need `adt_dtorck_constraint`.
371 pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
372 let dtor = match def.destructor(self) {
374 debug!("destructor_constraints({:?}) - no dtor", def.did);
377 Some(dtor) => dtor.did,
380 let impl_def_id = self.associated_item(dtor).container.id();
381 let impl_generics = self.generics_of(impl_def_id);
383 // We have a destructor - all the parameters that are not
384 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
387 // We need to return the list of parameters from the ADTs
388 // generics/substs that correspond to impure parameters on the
389 // impl's generics. This is a bit ugly, but conceptually simple:
391 // Suppose our ADT looks like the following
393 // struct S<X, Y, Z>(X, Y, Z);
397 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
399 // We want to return the parameters (X, Y). For that, we match
400 // up the item-substs <X, Y, Z> with the substs on the impl ADT,
401 // <P1, P2, P0>, and then look up which of the impl substs refer to
402 // parameters marked as pure.
404 let impl_substs = match self.type_of(impl_def_id).kind {
405 ty::Adt(def_, substs) if def_ == def => substs,
409 let item_substs = match self.type_of(def.did).kind {
410 ty::Adt(def_, substs) if def_ == def => substs,
414 let result = item_substs
416 .zip(impl_substs.iter())
419 GenericArgKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => {
420 !impl_generics.region_param(ebr, self).pure_wrt_drop
422 GenericArgKind::Type(&ty::TyS { kind: ty::Param(ref pt), .. }) => {
423 !impl_generics.type_param(pt, self).pure_wrt_drop
425 GenericArgKind::Const(&ty::Const {
426 val: ty::ConstKind::Param(ref pc), ..
427 }) => !impl_generics.const_param(pc, self).pure_wrt_drop,
428 GenericArgKind::Lifetime(_)
429 | GenericArgKind::Type(_)
430 | GenericArgKind::Const(_) => {
431 // Not a type, const or region param: this should be reported
437 .map(|(item_param, _)| item_param)
439 debug!("destructor_constraint({:?}) = {:?}", def.did, result);
443 /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
444 /// that closures have a `DefId`, but the closure *expression* also
445 /// has a `HirId` that is located within the context where the
446 /// closure appears (and, sadly, a corresponding `NodeId`, since
447 /// those are not yet phased out). The parent of the closure's
448 /// `DefId` will also be the context where it appears.
449 pub fn is_closure(self, def_id: DefId) -> bool {
450 matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
453 /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
454 pub fn is_trait(self, def_id: DefId) -> bool {
455 self.def_kind(def_id) == DefKind::Trait
458 /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
459 /// and `false` otherwise.
460 pub fn is_trait_alias(self, def_id: DefId) -> bool {
461 self.def_kind(def_id) == DefKind::TraitAlias
464 /// Returns `true` if this `DefId` refers to the implicit constructor for
465 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
466 pub fn is_constructor(self, def_id: DefId) -> bool {
467 matches!(self.def_kind(def_id), DefKind::Ctor(..))
470 /// Given the def-ID of a fn or closure, returns the def-ID of
471 /// the innermost fn item that the closure is contained within.
472 /// This is a significant `DefId` because, when we do
473 /// type-checking, we type-check this fn item and all of its
474 /// (transitive) closures together. Therefore, when we fetch the
475 /// `typeck` the closure, for example, we really wind up
476 /// fetching the `typeck` the enclosing fn item.
477 pub fn closure_base_def_id(self, def_id: DefId) -> DefId {
478 let mut def_id = def_id;
479 while self.is_closure(def_id) {
480 def_id = self.parent(def_id).unwrap_or_else(|| {
481 bug!("closure {:?} has no parent", def_id);
487 /// Given the `DefId` and substs a closure, creates the type of
488 /// `self` argument that the closure expects. For example, for a
489 /// `Fn` closure, this would return a reference type `&T` where
490 /// `T = closure_ty`.
492 /// Returns `None` if this closure's kind has not yet been inferred.
493 /// This should only be possible during type checking.
495 /// Note that the return value is a late-bound region and hence
496 /// wrapped in a binder.
497 pub fn closure_env_ty(
499 closure_def_id: DefId,
500 closure_substs: SubstsRef<'tcx>,
501 ) -> Option<ty::Binder<Ty<'tcx>>> {
502 let closure_ty = self.mk_closure(closure_def_id, closure_substs);
503 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
504 let closure_kind_ty = closure_substs.as_closure().kind_ty();
505 let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
506 let env_ty = match closure_kind {
507 ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
508 ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
509 ty::ClosureKind::FnOnce => closure_ty,
511 Some(ty::Binder::bind(env_ty))
514 /// Given the `DefId` of some item that has no type or const parameters, make
515 /// a suitable "empty substs" for it.
516 pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> SubstsRef<'tcx> {
517 InternalSubsts::for_item(self, item_def_id, |param, _| match param.kind {
518 GenericParamDefKind::Lifetime => self.lifetimes.re_erased.into(),
519 GenericParamDefKind::Type { .. } => {
520 bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
522 GenericParamDefKind::Const { .. } => {
523 bug!("empty_substs_for_def_id: {:?} has const parameters", item_def_id)
528 /// Returns `true` if the node pointed to by `def_id` is a `static` item.
529 pub fn is_static(&self, def_id: DefId) -> bool {
530 self.static_mutability(def_id).is_some()
533 /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
534 pub fn is_thread_local_static(&self, def_id: DefId) -> bool {
535 self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
538 /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
539 pub fn is_mutable_static(&self, def_id: DefId) -> bool {
540 self.static_mutability(def_id) == Some(hir::Mutability::Mut)
543 /// Get the type of the pointer to the static that we use in MIR.
544 pub fn static_ptr_ty(&self, def_id: DefId) -> Ty<'tcx> {
545 // Make sure that any constants in the static's type are evaluated.
546 let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
548 if self.is_mutable_static(def_id) {
549 self.mk_mut_ptr(static_ty)
551 self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
555 /// Expands the given impl trait type, stopping if the type is recursive.
556 pub fn try_expand_impl_trait_type(
559 substs: SubstsRef<'tcx>,
560 ) -> Result<Ty<'tcx>, Ty<'tcx>> {
561 let mut visitor = OpaqueTypeExpander {
562 seen_opaque_tys: FxHashSet::default(),
563 expanded_cache: FxHashMap::default(),
564 primary_def_id: Some(def_id),
565 found_recursion: false,
566 check_recursion: true,
570 let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
571 if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
575 struct OpaqueTypeExpander<'tcx> {
576 // Contains the DefIds of the opaque types that are currently being
577 // expanded. When we expand an opaque type we insert the DefId of
578 // that type, and when we finish expanding that type we remove the
580 seen_opaque_tys: FxHashSet<DefId>,
581 // Cache of all expansions we've seen so far. This is a critical
582 // optimization for some large types produced by async fn trees.
583 expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
584 primary_def_id: Option<DefId>,
585 found_recursion: bool,
586 /// Whether or not to check for recursive opaque types.
587 /// This is `true` when we're explicitly checking for opaque type
588 /// recursion, and 'false' otherwise to avoid unnecessary work.
589 check_recursion: bool,
593 impl<'tcx> OpaqueTypeExpander<'tcx> {
594 fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
595 if self.found_recursion {
598 let substs = substs.fold_with(self);
599 if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
600 let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
601 Some(expanded_ty) => expanded_ty,
603 let generic_ty = self.tcx.type_of(def_id);
604 let concrete_ty = generic_ty.subst(self.tcx, substs);
605 let expanded_ty = self.fold_ty(concrete_ty);
606 self.expanded_cache.insert((def_id, substs), expanded_ty);
610 if self.check_recursion {
611 self.seen_opaque_tys.remove(&def_id);
615 // If another opaque type that we contain is recursive, then it
616 // will report the error, so we don't have to.
617 self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
623 impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
624 fn tcx(&self) -> TyCtxt<'tcx> {
628 fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
629 if let ty::Opaque(def_id, substs) = t.kind {
630 self.expand_opaque_ty(def_id, substs).unwrap_or(t)
631 } else if t.has_opaque_types() {
632 t.super_fold_with(self)
639 impl<'tcx> ty::TyS<'tcx> {
640 /// Returns the maximum value for the given numeric type (including `char`s)
641 /// or returns `None` if the type is not numeric.
642 pub fn numeric_max_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
643 let val = match self.kind {
644 ty::Int(_) | ty::Uint(_) => {
645 let (size, signed) = int_size_and_signed(tcx, self);
646 let val = if signed { signed_max(size) as u128 } else { unsigned_max(size) };
649 ty::Char => Some(std::char::MAX as u128),
650 ty::Float(fty) => Some(match fty {
651 ast::FloatTy::F32 => ::rustc_apfloat::ieee::Single::INFINITY.to_bits(),
652 ast::FloatTy::F64 => ::rustc_apfloat::ieee::Double::INFINITY.to_bits(),
656 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
659 /// Returns the minimum value for the given numeric type (including `char`s)
660 /// or returns `None` if the type is not numeric.
661 pub fn numeric_min_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
662 let val = match self.kind {
663 ty::Int(_) | ty::Uint(_) => {
664 let (size, signed) = int_size_and_signed(tcx, self);
665 let val = if signed { truncate(signed_min(size) as u128, size) } else { 0 };
669 ty::Float(fty) => Some(match fty {
670 ast::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
671 ast::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
675 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
678 /// Checks whether values of this type `T` are *moved* or *copied*
679 /// when referenced -- this amounts to a check for whether `T:
680 /// Copy`, but note that we **don't** consider lifetimes when
681 /// doing this check. This means that we may generate MIR which
682 /// does copies even when the type actually doesn't satisfy the
683 /// full requirements for the `Copy` trait (cc #29149) -- this
684 /// winds up being reported as an error during NLL borrow check.
685 pub fn is_copy_modulo_regions(
687 tcx_at: TyCtxtAt<'tcx>,
688 param_env: ty::ParamEnv<'tcx>,
690 tcx_at.is_copy_raw(param_env.and(self))
693 /// Checks whether values of this type `T` have a size known at
694 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
695 /// for the purposes of this check, so it can be an
696 /// over-approximation in generic contexts, where one can have
697 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
698 /// actually carry lifetime requirements.
699 pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
700 self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
703 /// Checks whether values of this type `T` implement the `Freeze`
704 /// trait -- frozen types are those that do not contain a
705 /// `UnsafeCell` anywhere. This is a language concept used to
706 /// distinguish "true immutability", which is relevant to
707 /// optimization as well as the rules around static values. Note
708 /// that the `Freeze` trait is not exposed to end users and is
709 /// effectively an implementation detail.
710 // FIXME: use `TyCtxtAt` instead of separate `Span`.
711 pub fn is_freeze(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
712 self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
715 /// Fast path helper for testing if a type is `Freeze`.
717 /// Returning true means the type is known to be `Freeze`. Returning
718 /// `false` means nothing -- could be `Freeze`, might not be.
719 fn is_trivially_freeze(&self) -> bool {
732 | ty::FnPtr(_) => true,
733 ty::Tuple(_) => self.tuple_fields().all(Self::is_trivially_freeze),
734 ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
741 | ty::GeneratorWitness(_)
746 | ty::Projection(_) => false,
750 /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
751 /// non-copy and *might* have a destructor attached; if it returns
752 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
754 /// (Note that this implies that if `ty` has a destructor attached,
755 /// then `needs_drop` will definitely return `true` for `ty`.)
757 /// Note that this method is used to check eligible types in unions.
759 pub fn needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
760 // Avoid querying in simple cases.
761 match needs_drop_components(self, &tcx.data_layout) {
762 Err(AlwaysRequiresDrop) => true,
764 let query_ty = match *components {
766 // If we've got a single component, call the query with that
767 // to increase the chance that we hit the query cache.
768 [component_ty] => component_ty,
771 // This doesn't depend on regions, so try to minimize distinct
773 let erased = tcx.normalize_erasing_regions(param_env, query_ty);
774 tcx.needs_drop_raw(param_env.and(erased))
779 /// Returns `true` if equality for this type is both reflexive and structural.
781 /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
783 /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
784 /// types, equality for the type as a whole is structural when it is the same as equality
785 /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
786 /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
789 /// This function is "shallow" because it may return `true` for a composite type whose fields
790 /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
791 /// because equality for arrays is determined by the equality of each array element. If you
792 /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
793 /// down, you will need to use a type visitor.
795 pub fn is_structural_eq_shallow(&'tcx self, tcx: TyCtxt<'tcx>) -> bool {
797 // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
798 Adt(..) => tcx.has_structural_eq_impls(self),
800 // Primitive types that satisfy `Eq`.
801 Bool | Char | Int(_) | Uint(_) | Str | Never => true,
803 // Composite types that satisfy `Eq` when all of their fields do.
805 // Because this function is "shallow", we return `true` for these composites regardless
806 // of the type(s) contained within.
807 Ref(..) | Array(..) | Slice(_) | Tuple(..) => true,
809 // Raw pointers use bitwise comparison.
810 RawPtr(_) | FnPtr(_) => true,
812 // Floating point numbers are not `Eq`.
815 // Conservatively return `false` for all others...
817 // Anonymous function types
818 FnDef(..) | Closure(..) | Dynamic(..) | Generator(..) => false,
820 // Generic or inferred types
822 // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
823 // called for known, fully-monomorphized types.
824 Projection(_) | Opaque(..) | Param(_) | Bound(..) | Placeholder(_) | Infer(_) => false,
826 Foreign(_) | GeneratorWitness(..) | Error(_) => false,
830 pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
831 match (&a.kind, &b.kind) {
832 (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
837 substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b))
843 /// Check whether a type is representable. This means it cannot contain unboxed
844 /// structural recursion. This check is needed for structs and enums.
845 pub fn is_representable(&'tcx self, tcx: TyCtxt<'tcx>, sp: Span) -> Representability {
846 // Iterate until something non-representable is found
847 fn fold_repr<It: Iterator<Item = Representability>>(iter: It) -> Representability {
848 iter.fold(Representability::Representable, |r1, r2| match (r1, r2) {
849 (Representability::SelfRecursive(v1), Representability::SelfRecursive(v2)) => {
850 Representability::SelfRecursive(v1.into_iter().chain(v2).collect())
852 (r1, r2) => cmp::max(r1, r2),
856 fn are_inner_types_recursive<'tcx>(
859 seen: &mut Vec<Ty<'tcx>>,
860 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
862 ) -> Representability {
865 // Find non representable
866 fold_repr(ty.tuple_fields().map(|ty| {
867 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
870 // Fixed-length vectors.
871 // FIXME(#11924) Behavior undecided for zero-length vectors.
873 is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
875 Adt(def, substs) => {
876 // Find non representable fields with their spans
877 fold_repr(def.all_fields().map(|field| {
878 let ty = field.ty(tcx, substs);
879 let span = match field
882 .map(|id| tcx.hir().as_local_hir_id(id))
883 .and_then(|id| tcx.hir().find(id))
885 Some(hir::Node::Field(field)) => field.ty.span,
888 match is_type_structurally_recursive(
895 Representability::SelfRecursive(_) => {
896 Representability::SelfRecursive(vec![span])
903 // this check is run on type definitions, so we don't expect
904 // to see closure types
905 bug!("requires check invoked on inapplicable type: {:?}", ty)
907 _ => Representability::Representable,
911 fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
913 Adt(ty_def, _) => ty_def == def,
918 // Does the type `ty` directly (without indirection through a pointer)
919 // contain any types on stack `seen`?
920 fn is_type_structurally_recursive<'tcx>(
923 seen: &mut Vec<Ty<'tcx>>,
924 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
926 ) -> Representability {
927 debug!("is_type_structurally_recursive: {:?} {:?}", ty, sp);
928 if let Some(representability) = representable_cache.get(ty) {
930 "is_type_structurally_recursive: {:?} {:?} - (cached) {:?}",
931 ty, sp, representability
933 return representability.clone();
936 let representability =
937 is_type_structurally_recursive_inner(tcx, sp, seen, representable_cache, ty);
939 representable_cache.insert(ty, representability.clone());
943 fn is_type_structurally_recursive_inner<'tcx>(
946 seen: &mut Vec<Ty<'tcx>>,
947 representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
949 ) -> Representability {
953 // Iterate through stack of previously seen types.
954 let mut iter = seen.iter();
956 // The first item in `seen` is the type we are actually curious about.
957 // We want to return SelfRecursive if this type contains itself.
958 // It is important that we DON'T take generic parameters into account
959 // for this check, so that Bar<T> in this example counts as SelfRecursive:
962 // struct Bar<T> { x: Bar<Foo> }
964 if let Some(&seen_type) = iter.next() {
965 if same_struct_or_enum(seen_type, def) {
966 debug!("SelfRecursive: {:?} contains {:?}", seen_type, ty);
967 return Representability::SelfRecursive(vec![sp]);
971 // We also need to know whether the first item contains other types
972 // that are structurally recursive. If we don't catch this case, we
973 // will recurse infinitely for some inputs.
975 // It is important that we DO take generic parameters into account
976 // here, so that code like this is considered SelfRecursive, not
977 // ContainsRecursive:
979 // struct Foo { Option<Option<Foo>> }
981 for &seen_type in iter {
982 if ty::TyS::same_type(ty, seen_type) {
983 debug!("ContainsRecursive: {:?} contains {:?}", seen_type, ty);
984 return Representability::ContainsRecursive;
989 // For structs and enums, track all previously seen types by pushing them
990 // onto the 'seen' stack.
992 let out = are_inner_types_recursive(tcx, sp, seen, representable_cache, ty);
997 // No need to push in other cases.
998 are_inner_types_recursive(tcx, sp, seen, representable_cache, ty)
1003 debug!("is_type_representable: {:?}", self);
1005 // To avoid a stack overflow when checking an enum variant or struct that
1006 // contains a different, structurally recursive type, maintain a stack
1007 // of seen types and check recursion for each of them (issues #3008, #3779).
1008 let mut seen: Vec<Ty<'_>> = Vec::new();
1009 let mut representable_cache = FxHashMap::default();
1010 let r = is_type_structurally_recursive(tcx, sp, &mut seen, &mut representable_cache, self);
1011 debug!("is_type_representable: {:?} is {:?}", self, r);
1015 /// Peel off all reference types in this type until there are none left.
1017 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
1022 /// - `&'a mut u8` -> `u8`
1023 /// - `&'a &'b u8` -> `u8`
1024 /// - `&'a *const &'b u8 -> *const &'b u8`
1025 pub fn peel_refs(&'tcx self) -> Ty<'tcx> {
1027 while let Ref(_, inner_ty, _) = ty.kind {
1034 pub enum ExplicitSelf<'tcx> {
1036 ByReference(ty::Region<'tcx>, hir::Mutability),
1037 ByRawPointer(hir::Mutability),
1042 impl<'tcx> ExplicitSelf<'tcx> {
1043 /// Categorizes an explicit self declaration like `self: SomeType`
1044 /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
1046 /// This is mainly used to require the arbitrary_self_types feature
1047 /// in the case of `Other`, to improve error messages in the common cases,
1048 /// and to make `Other` non-object-safe.
1053 /// impl<'a> Foo for &'a T {
1054 /// // Legal declarations:
1055 /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
1056 /// fn method2(self: &'a T); // ExplicitSelf::ByValue
1057 /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
1058 /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
1060 /// // Invalid cases will be caught by `check_method_receiver`:
1061 /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
1062 /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
1063 /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
1067 pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
1069 P: Fn(Ty<'tcx>) -> bool,
1071 use self::ExplicitSelf::*;
1073 match self_arg_ty.kind {
1074 _ if is_self_ty(self_arg_ty) => ByValue,
1075 ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
1076 ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
1077 ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
1083 /// Returns a list of types such that the given type needs drop if and only if
1084 /// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
1085 /// this type always needs drop.
1086 pub fn needs_drop_components(
1088 target_layout: &TargetDataLayout,
1089 ) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
1091 ty::Infer(ty::FreshIntTy(_))
1092 | ty::Infer(ty::FreshFloatTy(_))
1101 | ty::GeneratorWitness(..)
1104 | ty::Str => Ok(SmallVec::new()),
1106 // Foreign types can never have destructors.
1107 ty::Foreign(..) => Ok(SmallVec::new()),
1109 ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
1111 ty::Slice(ty) => needs_drop_components(ty, target_layout),
1112 ty::Array(elem_ty, size) => {
1113 match needs_drop_components(elem_ty, target_layout) {
1114 Ok(v) if v.is_empty() => Ok(v),
1115 res => match size.val.try_to_bits(target_layout.pointer_size) {
1116 // Arrays of size zero don't need drop, even if their element
1118 Some(0) => Ok(SmallVec::new()),
1120 // We don't know which of the cases above we are in, so
1121 // return the whole type and let the caller decide what to
1123 None => Ok(smallvec![ty]),
1127 // If any field needs drop, then the whole tuple does.
1128 ty::Tuple(..) => ty.tuple_fields().try_fold(SmallVec::new(), move |mut acc, elem| {
1129 acc.extend(needs_drop_components(elem, target_layout)?);
1133 // These require checking for `Copy` bounds or `Adt` destructors.
1135 | ty::Projection(..)
1138 | ty::Placeholder(..)
1142 | ty::Generator(..) => Ok(smallvec![ty]),
1146 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
1147 pub struct AlwaysRequiresDrop;
1149 /// Normalizes all opaque types in the given value, replacing them
1150 /// with their underlying types.
1151 pub fn normalize_opaque_types(
1153 val: &'tcx List<ty::Predicate<'tcx>>,
1154 ) -> &'tcx List<ty::Predicate<'tcx>> {
1155 let mut visitor = OpaqueTypeExpander {
1156 seen_opaque_tys: FxHashSet::default(),
1157 expanded_cache: FxHashMap::default(),
1158 primary_def_id: None,
1159 found_recursion: false,
1160 check_recursion: false,
1163 val.fold_with(&mut visitor)
1166 pub fn provide(providers: &mut ty::query::Providers) {
1167 *providers = ty::query::Providers { normalize_opaque_types, ..*providers }