1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
3 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
5 use crate::ty::layout::IntegerExt;
6 use crate::ty::query::TyCtxtAt;
8 self, DefIdTree, FallibleTypeFolder, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
11 use crate::ty::{GenericArgKind, SubstsRef};
12 use rustc_apfloat::Float as _;
13 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
14 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
15 use rustc_errors::ErrorGuaranteed;
17 use rustc_hir::def::{CtorOf, DefKind, Res};
18 use rustc_hir::def_id::DefId;
19 use rustc_index::bit_set::GrowableBitSet;
20 use rustc_index::vec::{Idx, IndexVec};
21 use rustc_macros::HashStable;
22 use rustc_span::{sym, DUMMY_SP};
23 use rustc_target::abi::{Integer, IntegerType, Size, TargetDataLayout};
24 use rustc_target::spec::abi::Abi;
25 use smallvec::SmallVec;
28 #[derive(Copy, Clone, Debug)]
29 pub struct Discr<'tcx> {
30 /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
35 /// Used as an input to [`TyCtxt::uses_unique_generic_params`].
36 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
37 pub enum IgnoreRegions {
42 #[derive(Copy, Clone, Debug)]
43 pub enum NotUniqueParam<'tcx> {
44 DuplicateParam(ty::GenericArg<'tcx>),
45 NotParam(ty::GenericArg<'tcx>),
48 impl<'tcx> fmt::Display for Discr<'tcx> {
49 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
50 match *self.ty.kind() {
52 let size = ty::tls::with(|tcx| Integer::from_int_ty(&tcx, ity).size());
54 // sign extend the raw representation to be an i128
55 let x = size.sign_extend(x) as i128;
58 _ => write!(fmt, "{}", self.val),
63 fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
64 let (int, signed) = match *ty.kind() {
65 ty::Int(ity) => (Integer::from_int_ty(&tcx, ity), true),
66 ty::Uint(uty) => (Integer::from_uint_ty(&tcx, uty), false),
67 _ => bug!("non integer discriminant"),
72 impl<'tcx> Discr<'tcx> {
73 /// Adds `1` to the value and wraps around if the maximum for the type is reached.
74 pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
75 self.checked_add(tcx, 1).0
77 pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
78 let (size, signed) = int_size_and_signed(tcx, self.ty);
79 let (val, oflo) = if signed {
80 let min = size.signed_int_min();
81 let max = size.signed_int_max();
82 let val = size.sign_extend(self.val) as i128;
83 assert!(n < (i128::MAX as u128));
85 let oflo = val > max - n;
86 let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
87 // zero the upper bits
88 let val = val as u128;
89 let val = size.truncate(val);
92 let max = size.unsigned_int_max();
94 let oflo = val > max - n;
95 let val = if oflo { n - (max - val) - 1 } else { val + n };
98 (Self { val, ty: self.ty }, oflo)
102 pub trait IntTypeExt {
103 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
104 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
105 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
108 impl IntTypeExt for IntegerType {
109 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
111 IntegerType::Pointer(true) => tcx.types.isize,
112 IntegerType::Pointer(false) => tcx.types.usize,
113 IntegerType::Fixed(i, s) => i.to_ty(tcx, *s),
117 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
118 Discr { val: 0, ty: self.to_ty(tcx) }
121 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
122 if let Some(val) = val {
123 assert_eq!(self.to_ty(tcx), val.ty);
124 let (new, oflo) = val.checked_add(tcx, 1);
125 if oflo { None } else { Some(new) }
127 Some(self.initial_discriminant(tcx))
132 impl<'tcx> TyCtxt<'tcx> {
133 /// Creates a hash of the type `Ty` which will be the same no matter what crate
134 /// context it's calculated within. This is used by the `type_id` intrinsic.
135 pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
136 // We want the type_id be independent of the types free regions, so we
137 // erase them. The erase_regions() call will also anonymize bound
138 // regions, which is desirable too.
139 let ty = self.erase_regions(ty);
141 self.with_stable_hashing_context(|mut hcx| {
142 let mut hasher = StableHasher::new();
143 hcx.while_hashing_spans(false, |hcx| ty.hash_stable(hcx, &mut hasher));
148 pub fn res_generics_def_id(self, res: Res) -> Option<DefId> {
150 Res::Def(DefKind::Ctor(CtorOf::Variant, _), def_id) => {
151 Some(self.parent(self.parent(def_id)))
153 Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Struct, _), def_id) => {
154 Some(self.parent(def_id))
156 // Other `DefKind`s don't have generics and would ICE when calling
166 | DefKind::TraitAlias
170 | DefKind::AssocConst
179 pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
180 if let ty::Adt(def, substs) = *ty.kind() {
181 for field in def.all_fields() {
182 let field_ty = field.ty(self, substs);
183 if let ty::Error(_) = field_ty.kind() {
191 /// Attempts to returns the deeply last field of nested structures, but
192 /// does not apply any normalization in its search. Returns the same type
193 /// if input `ty` is not a structure at all.
194 pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
196 tcx.struct_tail_with_normalize(ty, |ty| ty, || {})
199 /// Returns the deeply last field of nested structures, or the same type if
200 /// not a structure at all. Corresponds to the only possible unsized field,
201 /// and its type can be used to determine unsizing strategy.
203 /// Should only be called if `ty` has no inference variables and does not
204 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
205 /// normalization attempt may cause compiler bugs.
206 pub fn struct_tail_erasing_lifetimes(
209 param_env: ty::ParamEnv<'tcx>,
212 tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty), || {})
215 /// Returns the deeply last field of nested structures, or the same type if
216 /// not a structure at all. Corresponds to the only possible unsized field,
217 /// and its type can be used to determine unsizing strategy.
219 /// This is parameterized over the normalization strategy (i.e. how to
220 /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
221 /// function to indicate no normalization should take place.
223 /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
225 pub fn struct_tail_with_normalize(
228 mut normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>,
229 // This is currently used to allow us to walk a ValTree
230 // in lockstep with the type in order to get the ValTree branch that
231 // corresponds to an unsized field.
232 mut f: impl FnMut() -> (),
234 let recursion_limit = self.recursion_limit();
235 for iteration in 0.. {
236 if !recursion_limit.value_within_limit(iteration) {
237 return self.ty_error_with_message(
239 &format!("reached the recursion limit finding the struct tail for {}", ty),
243 ty::Adt(def, substs) => {
244 if !def.is_struct() {
247 match def.non_enum_variant().fields.last() {
250 ty = field.ty(self, substs);
256 ty::Tuple(tys) if let Some((&last_ty, _)) = tys.split_last() => {
261 ty::Tuple(_) => break,
264 let normalized = normalize(ty);
265 if ty == normalized {
280 /// Same as applying `struct_tail` on `source` and `target`, but only
281 /// keeps going as long as the two types are instances of the same
282 /// structure definitions.
283 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
284 /// whereas struct_tail produces `T`, and `Trait`, respectively.
286 /// Should only be called if the types have no inference variables and do
287 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
288 /// normalization attempt may cause compiler bugs.
289 pub fn struct_lockstep_tails_erasing_lifetimes(
293 param_env: ty::ParamEnv<'tcx>,
294 ) -> (Ty<'tcx>, Ty<'tcx>) {
296 tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
297 tcx.normalize_erasing_regions(param_env, ty)
301 /// Same as applying `struct_tail` on `source` and `target`, but only
302 /// keeps going as long as the two types are instances of the same
303 /// structure definitions.
304 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
305 /// whereas struct_tail produces `T`, and `Trait`, respectively.
307 /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
309 pub fn struct_lockstep_tails_with_normalize(
313 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
314 ) -> (Ty<'tcx>, Ty<'tcx>) {
315 let (mut a, mut b) = (source, target);
317 match (&a.kind(), &b.kind()) {
318 (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs))
319 if a_def == b_def && a_def.is_struct() =>
321 if let Some(f) = a_def.non_enum_variant().fields.last() {
322 a = f.ty(self, a_substs);
323 b = f.ty(self, b_substs);
328 (&ty::Tuple(a_tys), &ty::Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
329 if let Some(&a_last) = a_tys.last() {
331 b = *b_tys.last().unwrap();
336 (ty::Alias(..), _) | (_, ty::Alias(..)) => {
337 // If either side is a projection, attempt to
338 // progress via normalization. (Should be safe to
339 // apply to both sides as normalization is
341 let a_norm = normalize(a);
342 let b_norm = normalize(b);
343 if a == a_norm && b == b_norm {
357 /// Calculate the destructor of a given type.
358 pub fn calculate_dtor(
361 validate: impl Fn(Self, DefId) -> Result<(), ErrorGuaranteed>,
362 ) -> Option<ty::Destructor> {
363 let drop_trait = self.lang_items().drop_trait()?;
364 self.ensure().coherent_trait(drop_trait);
366 let ty = self.type_of(adt_did);
367 let (did, constness) = self.find_map_relevant_impl(drop_trait, ty, |impl_did| {
368 if let Some(item_id) = self.associated_item_def_ids(impl_did).first() {
369 if validate(self, impl_did).is_ok() {
370 return Some((*item_id, self.constness(impl_did)));
376 Some(ty::Destructor { did, constness })
379 /// Returns the set of types that are required to be alive in
380 /// order to run the destructor of `def` (see RFCs 769 and
383 /// Note that this returns only the constraints for the
384 /// destructor of `def` itself. For the destructors of the
385 /// contents, you need `adt_dtorck_constraint`.
386 pub fn destructor_constraints(self, def: ty::AdtDef<'tcx>) -> Vec<ty::subst::GenericArg<'tcx>> {
387 let dtor = match def.destructor(self) {
389 debug!("destructor_constraints({:?}) - no dtor", def.did());
392 Some(dtor) => dtor.did,
395 let impl_def_id = self.parent(dtor);
396 let impl_generics = self.generics_of(impl_def_id);
398 // We have a destructor - all the parameters that are not
399 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
402 // We need to return the list of parameters from the ADTs
403 // generics/substs that correspond to impure parameters on the
404 // impl's generics. This is a bit ugly, but conceptually simple:
406 // Suppose our ADT looks like the following
408 // struct S<X, Y, Z>(X, Y, Z);
412 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
414 // We want to return the parameters (X, Y). For that, we match
415 // up the item-substs <X, Y, Z> with the substs on the impl ADT,
416 // <P1, P2, P0>, and then look up which of the impl substs refer to
417 // parameters marked as pure.
419 let impl_substs = match *self.type_of(impl_def_id).kind() {
420 ty::Adt(def_, substs) if def_ == def => substs,
424 let item_substs = match *self.type_of(def.did()).kind() {
425 ty::Adt(def_, substs) if def_ == def => substs,
429 let result = iter::zip(item_substs, impl_substs)
432 GenericArgKind::Lifetime(region) => match region.kind() {
433 ty::ReEarlyBound(ref ebr) => {
434 !impl_generics.region_param(ebr, self).pure_wrt_drop
436 // Error: not a region param
439 GenericArgKind::Type(ty) => match ty.kind() {
440 ty::Param(ref pt) => !impl_generics.type_param(pt, self).pure_wrt_drop,
441 // Error: not a type param
444 GenericArgKind::Const(ct) => match ct.kind() {
445 ty::ConstKind::Param(ref pc) => {
446 !impl_generics.const_param(pc, self).pure_wrt_drop
448 // Error: not a const param
453 .map(|(item_param, _)| item_param)
455 debug!("destructor_constraint({:?}) = {:?}", def.did(), result);
459 /// Checks whether each generic argument is simply a unique generic parameter.
460 pub fn uses_unique_generic_params(
462 substs: SubstsRef<'tcx>,
463 ignore_regions: IgnoreRegions,
464 ) -> Result<(), NotUniqueParam<'tcx>> {
465 let mut seen = GrowableBitSet::default();
468 GenericArgKind::Lifetime(lt) => {
469 if ignore_regions == IgnoreRegions::No {
470 let ty::ReEarlyBound(p) = lt.kind() else {
471 return Err(NotUniqueParam::NotParam(lt.into()))
473 if !seen.insert(p.index) {
474 return Err(NotUniqueParam::DuplicateParam(lt.into()));
478 GenericArgKind::Type(t) => match t.kind() {
480 if !seen.insert(p.index) {
481 return Err(NotUniqueParam::DuplicateParam(t.into()));
484 _ => return Err(NotUniqueParam::NotParam(t.into())),
486 GenericArgKind::Const(c) => match c.kind() {
487 ty::ConstKind::Param(p) => {
488 if !seen.insert(p.index) {
489 return Err(NotUniqueParam::DuplicateParam(c.into()));
492 _ => return Err(NotUniqueParam::NotParam(c.into())),
500 /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
501 /// that closures have a `DefId`, but the closure *expression* also
502 /// has a `HirId` that is located within the context where the
503 /// closure appears (and, sadly, a corresponding `NodeId`, since
504 /// those are not yet phased out). The parent of the closure's
505 /// `DefId` will also be the context where it appears.
506 pub fn is_closure(self, def_id: DefId) -> bool {
507 matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
510 /// Returns `true` if `def_id` refers to a definition that does not have its own
511 /// type-checking context, i.e. closure, generator or inline const.
512 pub fn is_typeck_child(self, def_id: DefId) -> bool {
514 self.def_kind(def_id),
515 DefKind::Closure | DefKind::Generator | DefKind::InlineConst
519 /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
520 pub fn is_trait(self, def_id: DefId) -> bool {
521 self.def_kind(def_id) == DefKind::Trait
524 /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
525 /// and `false` otherwise.
526 pub fn is_trait_alias(self, def_id: DefId) -> bool {
527 self.def_kind(def_id) == DefKind::TraitAlias
530 /// Returns `true` if this `DefId` refers to the implicit constructor for
531 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
532 pub fn is_constructor(self, def_id: DefId) -> bool {
533 matches!(self.def_kind(def_id), DefKind::Ctor(..))
536 /// Given the `DefId`, returns the `DefId` of the innermost item that
537 /// has its own type-checking context or "inference environment".
539 /// For example, a closure has its own `DefId`, but it is type-checked
540 /// with the containing item. Similarly, an inline const block has its
541 /// own `DefId` but it is type-checked together with the containing item.
543 /// Therefore, when we fetch the
544 /// `typeck` the closure, for example, we really wind up
545 /// fetching the `typeck` the enclosing fn item.
546 pub fn typeck_root_def_id(self, def_id: DefId) -> DefId {
547 let mut def_id = def_id;
548 while self.is_typeck_child(def_id) {
549 def_id = self.parent(def_id);
554 /// Given the `DefId` and substs a closure, creates the type of
555 /// `self` argument that the closure expects. For example, for a
556 /// `Fn` closure, this would return a reference type `&T` where
557 /// `T = closure_ty`.
559 /// Returns `None` if this closure's kind has not yet been inferred.
560 /// This should only be possible during type checking.
562 /// Note that the return value is a late-bound region and hence
563 /// wrapped in a binder.
564 pub fn closure_env_ty(
566 closure_def_id: DefId,
567 closure_substs: SubstsRef<'tcx>,
568 env_region: ty::RegionKind<'tcx>,
569 ) -> Option<Ty<'tcx>> {
570 let closure_ty = self.mk_closure(closure_def_id, closure_substs);
571 let closure_kind_ty = closure_substs.as_closure().kind_ty();
572 let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
573 let env_ty = match closure_kind {
574 ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
575 ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
576 ty::ClosureKind::FnOnce => closure_ty,
581 /// Returns `true` if the node pointed to by `def_id` is a `static` item.
583 pub fn is_static(self, def_id: DefId) -> bool {
584 matches!(self.def_kind(def_id), DefKind::Static(_))
588 pub fn static_mutability(self, def_id: DefId) -> Option<hir::Mutability> {
589 if let DefKind::Static(mt) = self.def_kind(def_id) { Some(mt) } else { None }
592 /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
593 pub fn is_thread_local_static(self, def_id: DefId) -> bool {
594 self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
597 /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
599 pub fn is_mutable_static(self, def_id: DefId) -> bool {
600 self.static_mutability(def_id) == Some(hir::Mutability::Mut)
603 /// Get the type of the pointer to the static that we use in MIR.
604 pub fn static_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
605 // Make sure that any constants in the static's type are evaluated.
606 let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
608 // Make sure that accesses to unsafe statics end up using raw pointers.
609 // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
610 if self.is_mutable_static(def_id) {
611 self.mk_mut_ptr(static_ty)
612 } else if self.is_foreign_item(def_id) {
613 self.mk_imm_ptr(static_ty)
615 self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
619 /// Expands the given impl trait type, stopping if the type is recursive.
620 #[instrument(skip(self), level = "debug", ret)]
621 pub fn try_expand_impl_trait_type(
624 substs: SubstsRef<'tcx>,
625 ) -> Result<Ty<'tcx>, Ty<'tcx>> {
626 let mut visitor = OpaqueTypeExpander {
627 seen_opaque_tys: FxHashSet::default(),
628 expanded_cache: FxHashMap::default(),
629 primary_def_id: Some(def_id),
630 found_recursion: false,
631 found_any_recursion: false,
632 check_recursion: true,
636 let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
637 if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
640 pub fn bound_type_of(self, def_id: DefId) -> ty::EarlyBinder<Ty<'tcx>> {
641 ty::EarlyBinder(self.type_of(def_id))
644 pub fn bound_trait_impl_trait_tys(
647 ) -> ty::EarlyBinder<Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed>> {
648 ty::EarlyBinder(self.collect_trait_impl_trait_tys(def_id))
651 pub fn bound_fn_sig(self, def_id: DefId) -> ty::EarlyBinder<ty::PolyFnSig<'tcx>> {
652 ty::EarlyBinder(self.fn_sig(def_id))
655 pub fn bound_impl_trait_ref(
658 ) -> Option<ty::EarlyBinder<ty::TraitRef<'tcx>>> {
659 self.impl_trait_ref(def_id).map(|i| ty::EarlyBinder(i))
662 pub fn bound_explicit_item_bounds(
665 ) -> ty::EarlyBinder<&'tcx [(ty::Predicate<'tcx>, rustc_span::Span)]> {
666 ty::EarlyBinder(self.explicit_item_bounds(def_id))
669 pub fn bound_item_bounds(
672 ) -> ty::EarlyBinder<&'tcx ty::List<ty::Predicate<'tcx>>> {
673 ty::EarlyBinder(self.item_bounds(def_id))
676 pub fn bound_const_param_default(self, def_id: DefId) -> ty::EarlyBinder<ty::Const<'tcx>> {
677 ty::EarlyBinder(self.const_param_default(def_id))
680 pub fn bound_predicates_of(
683 ) -> ty::EarlyBinder<ty::generics::GenericPredicates<'tcx>> {
684 ty::EarlyBinder(self.predicates_of(def_id))
687 pub fn bound_explicit_predicates_of(
690 ) -> ty::EarlyBinder<ty::generics::GenericPredicates<'tcx>> {
691 ty::EarlyBinder(self.explicit_predicates_of(def_id))
694 pub fn bound_impl_subject(self, def_id: DefId) -> ty::EarlyBinder<ty::ImplSubject<'tcx>> {
695 ty::EarlyBinder(self.impl_subject(def_id))
698 /// Returns names of captured upvars for closures and generators.
700 /// Here are some examples:
701 /// - `name__field1__field2` when the upvar is captured by value.
702 /// - `_ref__name__field` when the upvar is captured by reference.
704 /// For generators this only contains upvars that are shared by all states.
705 pub fn closure_saved_names_of_captured_variables(
708 ) -> SmallVec<[String; 16]> {
709 let body = self.optimized_mir(def_id);
714 let is_ref = match var.value {
715 mir::VarDebugInfoContents::Place(place)
716 if place.local == mir::Local::new(1) =>
718 // The projection is either `[.., Field, Deref]` or `[.., Field]`. It
719 // implies whether the variable is captured by value or by reference.
720 matches!(place.projection.last().unwrap(), mir::ProjectionElem::Deref)
724 let prefix = if is_ref { "_ref__" } else { "" };
725 Some(prefix.to_owned() + var.name.as_str())
730 // FIXME(eddyb) maybe precompute this? Right now it's computed once
731 // per generator monomorphization, but it doesn't depend on substs.
732 pub fn generator_layout_and_saved_local_names(
736 &'tcx ty::GeneratorLayout<'tcx>,
737 IndexVec<mir::GeneratorSavedLocal, Option<rustc_span::Symbol>>,
740 let body = tcx.optimized_mir(def_id);
741 let generator_layout = body.generator_layout().unwrap();
742 let mut generator_saved_local_names =
743 IndexVec::from_elem(None, &generator_layout.field_tys);
745 let state_arg = mir::Local::new(1);
746 for var in &body.var_debug_info {
747 let mir::VarDebugInfoContents::Place(place) = &var.value else { continue };
748 if place.local != state_arg {
751 match place.projection[..] {
753 // Deref of the `Pin<&mut Self>` state argument.
754 mir::ProjectionElem::Field(..),
755 mir::ProjectionElem::Deref,
756 // Field of a variant of the state.
757 mir::ProjectionElem::Downcast(_, variant),
758 mir::ProjectionElem::Field(field, _),
760 let name = &mut generator_saved_local_names
761 [generator_layout.variant_fields[variant][field]];
763 name.replace(var.name);
769 (generator_layout, generator_saved_local_names)
773 impl<'tcx> TyCtxtAt<'tcx> {
774 pub fn bound_type_of(self, def_id: DefId) -> ty::EarlyBinder<Ty<'tcx>> {
775 ty::EarlyBinder(self.type_of(def_id))
779 struct OpaqueTypeExpander<'tcx> {
780 // Contains the DefIds of the opaque types that are currently being
781 // expanded. When we expand an opaque type we insert the DefId of
782 // that type, and when we finish expanding that type we remove the
784 seen_opaque_tys: FxHashSet<DefId>,
785 // Cache of all expansions we've seen so far. This is a critical
786 // optimization for some large types produced by async fn trees.
787 expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
788 primary_def_id: Option<DefId>,
789 found_recursion: bool,
790 found_any_recursion: bool,
791 /// Whether or not to check for recursive opaque types.
792 /// This is `true` when we're explicitly checking for opaque type
793 /// recursion, and 'false' otherwise to avoid unnecessary work.
794 check_recursion: bool,
798 impl<'tcx> OpaqueTypeExpander<'tcx> {
799 fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
800 if self.found_any_recursion {
803 let substs = substs.fold_with(self);
804 if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
805 let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
806 Some(expanded_ty) => *expanded_ty,
808 let generic_ty = self.tcx.bound_type_of(def_id);
809 let concrete_ty = generic_ty.subst(self.tcx, substs);
810 let expanded_ty = self.fold_ty(concrete_ty);
811 self.expanded_cache.insert((def_id, substs), expanded_ty);
815 if self.check_recursion {
816 self.seen_opaque_tys.remove(&def_id);
820 // If another opaque type that we contain is recursive, then it
821 // will report the error, so we don't have to.
822 self.found_any_recursion = true;
823 self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
829 impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
830 fn tcx(&self) -> TyCtxt<'tcx> {
834 fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
835 if let ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) = *t.kind() {
836 self.expand_opaque_ty(def_id, substs).unwrap_or(t)
837 } else if t.has_opaque_types() {
838 t.super_fold_with(self)
845 impl<'tcx> Ty<'tcx> {
846 /// Returns the maximum value for the given numeric type (including `char`s)
847 /// or returns `None` if the type is not numeric.
848 pub fn numeric_max_val(self, tcx: TyCtxt<'tcx>) -> Option<ty::Const<'tcx>> {
849 let val = match self.kind() {
850 ty::Int(_) | ty::Uint(_) => {
851 let (size, signed) = int_size_and_signed(tcx, self);
853 if signed { size.signed_int_max() as u128 } else { size.unsigned_int_max() };
856 ty::Char => Some(std::char::MAX as u128),
857 ty::Float(fty) => Some(match fty {
858 ty::FloatTy::F32 => rustc_apfloat::ieee::Single::INFINITY.to_bits(),
859 ty::FloatTy::F64 => rustc_apfloat::ieee::Double::INFINITY.to_bits(),
864 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
867 /// Returns the minimum value for the given numeric type (including `char`s)
868 /// or returns `None` if the type is not numeric.
869 pub fn numeric_min_val(self, tcx: TyCtxt<'tcx>) -> Option<ty::Const<'tcx>> {
870 let val = match self.kind() {
871 ty::Int(_) | ty::Uint(_) => {
872 let (size, signed) = int_size_and_signed(tcx, self);
873 let val = if signed { size.truncate(size.signed_int_min() as u128) } else { 0 };
877 ty::Float(fty) => Some(match fty {
878 ty::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
879 ty::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
884 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
887 /// Checks whether values of this type `T` are *moved* or *copied*
888 /// when referenced -- this amounts to a check for whether `T:
889 /// Copy`, but note that we **don't** consider lifetimes when
890 /// doing this check. This means that we may generate MIR which
891 /// does copies even when the type actually doesn't satisfy the
892 /// full requirements for the `Copy` trait (cc #29149) -- this
893 /// winds up being reported as an error during NLL borrow check.
894 pub fn is_copy_modulo_regions(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
895 self.is_trivially_pure_clone_copy() || tcx.is_copy_raw(param_env.and(self))
898 /// Checks whether values of this type `T` have a size known at
899 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
900 /// for the purposes of this check, so it can be an
901 /// over-approximation in generic contexts, where one can have
902 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
903 /// actually carry lifetime requirements.
904 pub fn is_sized(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
905 self.is_trivially_sized(tcx) || tcx.is_sized_raw(param_env.and(self))
908 /// Checks whether values of this type `T` implement the `Freeze`
909 /// trait -- frozen types are those that do not contain an
910 /// `UnsafeCell` anywhere. This is a language concept used to
911 /// distinguish "true immutability", which is relevant to
912 /// optimization as well as the rules around static values. Note
913 /// that the `Freeze` trait is not exposed to end users and is
914 /// effectively an implementation detail.
915 pub fn is_freeze(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
916 self.is_trivially_freeze() || tcx.is_freeze_raw(param_env.and(self))
919 /// Fast path helper for testing if a type is `Freeze`.
921 /// Returning true means the type is known to be `Freeze`. Returning
922 /// `false` means nothing -- could be `Freeze`, might not be.
923 fn is_trivially_freeze(self) -> bool {
936 | ty::FnPtr(_) => true,
937 ty::Tuple(fields) => fields.iter().all(Self::is_trivially_freeze),
938 ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
945 | ty::GeneratorWitness(_)
949 | ty::Placeholder(_) => false,
953 /// Checks whether values of this type `T` implement the `Unpin` trait.
954 pub fn is_unpin(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
955 self.is_trivially_unpin() || tcx.is_unpin_raw(param_env.and(self))
958 /// Fast path helper for testing if a type is `Unpin`.
960 /// Returning true means the type is known to be `Unpin`. Returning
961 /// `false` means nothing -- could be `Unpin`, might not be.
962 fn is_trivially_unpin(self) -> bool {
975 | ty::FnPtr(_) => true,
976 ty::Tuple(fields) => fields.iter().all(Self::is_trivially_unpin),
977 ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_unpin(),
984 | ty::GeneratorWitness(_)
988 | ty::Placeholder(_) => false,
992 /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
993 /// non-copy and *might* have a destructor attached; if it returns
994 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
996 /// (Note that this implies that if `ty` has a destructor attached,
997 /// then `needs_drop` will definitely return `true` for `ty`.)
999 /// Note that this method is used to check eligible types in unions.
1001 pub fn needs_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
1002 // Avoid querying in simple cases.
1003 match needs_drop_components(self, &tcx.data_layout) {
1004 Err(AlwaysRequiresDrop) => true,
1006 let query_ty = match *components {
1008 // If we've got a single component, call the query with that
1009 // to increase the chance that we hit the query cache.
1010 [component_ty] => component_ty,
1014 // This doesn't depend on regions, so try to minimize distinct
1016 // If normalization fails, we just use `query_ty`.
1018 tcx.try_normalize_erasing_regions(param_env, query_ty).unwrap_or(query_ty);
1020 tcx.needs_drop_raw(param_env.and(query_ty))
1025 /// Checks if `ty` has a significant drop.
1027 /// Note that this method can return false even if `ty` has a destructor
1028 /// attached; even if that is the case then the adt has been marked with
1029 /// the attribute `rustc_insignificant_dtor`.
1031 /// Note that this method is used to check for change in drop order for
1032 /// 2229 drop reorder migration analysis.
1034 pub fn has_significant_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
1035 // Avoid querying in simple cases.
1036 match needs_drop_components(self, &tcx.data_layout) {
1037 Err(AlwaysRequiresDrop) => true,
1039 let query_ty = match *components {
1041 // If we've got a single component, call the query with that
1042 // to increase the chance that we hit the query cache.
1043 [component_ty] => component_ty,
1047 // FIXME(#86868): We should be canonicalizing, or else moving this to a method of inference
1048 // context, or *something* like that, but for now just avoid passing inference
1049 // variables to queries that can't cope with them. Instead, conservatively
1050 // return "true" (may change drop order).
1051 if query_ty.needs_infer() {
1055 // This doesn't depend on regions, so try to minimize distinct
1057 let erased = tcx.normalize_erasing_regions(param_env, query_ty);
1058 tcx.has_significant_drop_raw(param_env.and(erased))
1063 /// Returns `true` if equality for this type is both reflexive and structural.
1065 /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
1067 /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
1068 /// types, equality for the type as a whole is structural when it is the same as equality
1069 /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
1070 /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
1073 /// This function is "shallow" because it may return `true` for a composite type whose fields
1074 /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
1075 /// because equality for arrays is determined by the equality of each array element. If you
1076 /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
1077 /// down, you will need to use a type visitor.
1079 pub fn is_structural_eq_shallow(self, tcx: TyCtxt<'tcx>) -> bool {
1081 // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
1082 ty::Adt(..) => tcx.has_structural_eq_impls(self),
1084 // Primitive types that satisfy `Eq`.
1085 ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Str | ty::Never => true,
1087 // Composite types that satisfy `Eq` when all of their fields do.
1089 // Because this function is "shallow", we return `true` for these composites regardless
1090 // of the type(s) contained within.
1091 ty::Ref(..) | ty::Array(..) | ty::Slice(_) | ty::Tuple(..) => true,
1093 // Raw pointers use bitwise comparison.
1094 ty::RawPtr(_) | ty::FnPtr(_) => true,
1096 // Floating point numbers are not `Eq`.
1097 ty::Float(_) => false,
1099 // Conservatively return `false` for all others...
1101 // Anonymous function types
1102 ty::FnDef(..) | ty::Closure(..) | ty::Dynamic(..) | ty::Generator(..) => false,
1104 // Generic or inferred types
1106 // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
1107 // called for known, fully-monomorphized types.
1108 ty::Alias(..) | ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) => {
1112 ty::Foreign(_) | ty::GeneratorWitness(..) | ty::Error(_) => false,
1116 /// Peel off all reference types in this type until there are none left.
1118 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
1123 /// - `&'a mut u8` -> `u8`
1124 /// - `&'a &'b u8` -> `u8`
1125 /// - `&'a *const &'b u8 -> *const &'b u8`
1126 pub fn peel_refs(self) -> Ty<'tcx> {
1128 while let ty::Ref(_, inner_ty, _) = ty.kind() {
1135 pub fn outer_exclusive_binder(self) -> ty::DebruijnIndex {
1136 self.0.outer_exclusive_binder
1140 pub enum ExplicitSelf<'tcx> {
1142 ByReference(ty::Region<'tcx>, hir::Mutability),
1143 ByRawPointer(hir::Mutability),
1148 impl<'tcx> ExplicitSelf<'tcx> {
1149 /// Categorizes an explicit self declaration like `self: SomeType`
1150 /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
1152 /// This is mainly used to require the arbitrary_self_types feature
1153 /// in the case of `Other`, to improve error messages in the common cases,
1154 /// and to make `Other` non-object-safe.
1158 /// ```ignore (illustrative)
1159 /// impl<'a> Foo for &'a T {
1160 /// // Legal declarations:
1161 /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
1162 /// fn method2(self: &'a T); // ExplicitSelf::ByValue
1163 /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
1164 /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
1166 /// // Invalid cases will be caught by `check_method_receiver`:
1167 /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
1168 /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
1169 /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
1173 pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
1175 P: Fn(Ty<'tcx>) -> bool,
1177 use self::ExplicitSelf::*;
1179 match *self_arg_ty.kind() {
1180 _ if is_self_ty(self_arg_ty) => ByValue,
1181 ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
1182 ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
1183 ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
1189 /// Returns a list of types such that the given type needs drop if and only if
1190 /// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
1191 /// this type always needs drop.
1192 pub fn needs_drop_components<'tcx>(
1194 target_layout: &TargetDataLayout,
1195 ) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
1197 ty::Infer(ty::FreshIntTy(_))
1198 | ty::Infer(ty::FreshFloatTy(_))
1207 | ty::GeneratorWitness(..)
1210 | ty::Str => Ok(SmallVec::new()),
1212 // Foreign types can never have destructors.
1213 ty::Foreign(..) => Ok(SmallVec::new()),
1215 ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
1217 ty::Slice(ty) => needs_drop_components(*ty, target_layout),
1218 ty::Array(elem_ty, size) => {
1219 match needs_drop_components(*elem_ty, target_layout) {
1220 Ok(v) if v.is_empty() => Ok(v),
1221 res => match size.kind().try_to_bits(target_layout.pointer_size) {
1222 // Arrays of size zero don't need drop, even if their element
1224 Some(0) => Ok(SmallVec::new()),
1226 // We don't know which of the cases above we are in, so
1227 // return the whole type and let the caller decide what to
1229 None => Ok(smallvec![ty]),
1233 // If any field needs drop, then the whole tuple does.
1234 ty::Tuple(fields) => fields.iter().try_fold(SmallVec::new(), move |mut acc, elem| {
1235 acc.extend(needs_drop_components(elem, target_layout)?);
1239 // These require checking for `Copy` bounds or `Adt` destructors.
1244 | ty::Placeholder(..)
1247 | ty::Generator(..) => Ok(smallvec![ty]),
1251 pub fn is_trivially_const_drop(ty: Ty<'_>) -> bool {
1258 | ty::Infer(ty::IntVar(_))
1259 | ty::Infer(ty::FloatVar(_))
1266 | ty::Foreign(_) => true,
1273 | ty::Placeholder(_)
1274 | ty::Infer(_) => false,
1276 // Not trivial because they have components, and instead of looking inside,
1277 // we'll just perform trait selection.
1278 ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(_) | ty::Adt(..) => false,
1280 ty::Array(ty, _) | ty::Slice(ty) => is_trivially_const_drop(ty),
1282 ty::Tuple(tys) => tys.iter().all(|ty| is_trivially_const_drop(ty)),
1286 /// Does the equivalent of
1287 /// ```ignore (ilustrative)
1288 /// let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
1289 /// folder.tcx().intern_*(&v)
1291 pub fn fold_list<'tcx, F, T>(
1292 list: &'tcx ty::List<T>,
1294 intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>,
1295 ) -> Result<&'tcx ty::List<T>, F::Error>
1297 F: FallibleTypeFolder<'tcx>,
1298 T: TypeFoldable<'tcx> + PartialEq + Copy,
1300 let mut iter = list.iter();
1301 // Look for the first element that changed
1302 match iter.by_ref().enumerate().find_map(|(i, t)| match t.try_fold_with(folder) {
1303 Ok(new_t) if new_t == t => None,
1304 new_t => Some((i, new_t)),
1306 Some((i, Ok(new_t))) => {
1307 // An element changed, prepare to intern the resulting list
1308 let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len());
1309 new_list.extend_from_slice(&list[..i]);
1310 new_list.push(new_t);
1312 new_list.push(t.try_fold_with(folder)?)
1314 Ok(intern(folder.tcx(), &new_list))
1316 Some((_, Err(err))) => {
1323 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
1324 pub struct AlwaysRequiresDrop;
1326 /// Reveals all opaque types in the given value, replacing them
1327 /// with their underlying types.
1328 pub fn reveal_opaque_types_in_bounds<'tcx>(
1330 val: &'tcx ty::List<ty::Predicate<'tcx>>,
1331 ) -> &'tcx ty::List<ty::Predicate<'tcx>> {
1332 let mut visitor = OpaqueTypeExpander {
1333 seen_opaque_tys: FxHashSet::default(),
1334 expanded_cache: FxHashMap::default(),
1335 primary_def_id: None,
1336 found_recursion: false,
1337 found_any_recursion: false,
1338 check_recursion: false,
1341 val.fold_with(&mut visitor)
1344 /// Determines whether an item is annotated with `doc(hidden)`.
1345 pub fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
1346 tcx.get_attrs(def_id, sym::doc)
1347 .filter_map(|attr| attr.meta_item_list())
1348 .any(|items| items.iter().any(|item| item.has_name(sym::hidden)))
1351 /// Determines whether an item is annotated with `doc(notable_trait)`.
1352 pub fn is_doc_notable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
1353 tcx.get_attrs(def_id, sym::doc)
1354 .filter_map(|attr| attr.meta_item_list())
1355 .any(|items| items.iter().any(|item| item.has_name(sym::notable_trait)))
1358 /// Determines whether an item is an intrinsic by Abi.
1359 pub fn is_intrinsic(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
1360 matches!(tcx.fn_sig(def_id).abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic)
1363 pub fn provide(providers: &mut ty::query::Providers) {
1364 *providers = ty::query::Providers {
1365 reveal_opaque_types_in_bounds,
1367 is_doc_notable_trait,