]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
extend polymorphization hack comment.
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::{
5     self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
6     TyCtxt, TypeVisitable,
7 };
8 use rustc_ast as ast;
9 use rustc_attr as attr;
10 use rustc_hir as hir;
11 use rustc_hir::def_id::DefId;
12 use rustc_hir::lang_items::LangItem;
13 use rustc_index::bit_set::BitSet;
14 use rustc_index::vec::{Idx, IndexVec};
15 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
16 use rustc_span::symbol::Symbol;
17 use rustc_span::{Span, DUMMY_SP};
18 use rustc_target::abi::call::{
19     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 };
21 use rustc_target::abi::*;
22 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
23
24 use std::cmp::{self, Ordering};
25 use std::fmt;
26 use std::iter;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 use rand::{seq::SliceRandom, SeedableRng};
31 use rand_xoshiro::Xoshiro128StarStar;
32
33 pub fn provide(providers: &mut ty::query::Providers) {
34     *providers =
35         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
36 }
37
38 pub trait IntegerExt {
39     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
40     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
41     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
42     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
43     fn repr_discr<'tcx>(
44         tcx: TyCtxt<'tcx>,
45         ty: Ty<'tcx>,
46         repr: &ReprOptions,
47         min: i128,
48         max: i128,
49     ) -> (Integer, bool);
50 }
51
52 impl IntegerExt for Integer {
53     #[inline]
54     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
55         match (*self, signed) {
56             (I8, false) => tcx.types.u8,
57             (I16, false) => tcx.types.u16,
58             (I32, false) => tcx.types.u32,
59             (I64, false) => tcx.types.u64,
60             (I128, false) => tcx.types.u128,
61             (I8, true) => tcx.types.i8,
62             (I16, true) => tcx.types.i16,
63             (I32, true) => tcx.types.i32,
64             (I64, true) => tcx.types.i64,
65             (I128, true) => tcx.types.i128,
66         }
67     }
68
69     /// Gets the Integer type from an attr::IntType.
70     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
71         let dl = cx.data_layout();
72
73         match ity {
74             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
75             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
76             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
77             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
78             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
79             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
80                 dl.ptr_sized_integer()
81             }
82         }
83     }
84
85     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
86         match ity {
87             ty::IntTy::I8 => I8,
88             ty::IntTy::I16 => I16,
89             ty::IntTy::I32 => I32,
90             ty::IntTy::I64 => I64,
91             ty::IntTy::I128 => I128,
92             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
93         }
94     }
95     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
96         match ity {
97             ty::UintTy::U8 => I8,
98             ty::UintTy::U16 => I16,
99             ty::UintTy::U32 => I32,
100             ty::UintTy::U64 => I64,
101             ty::UintTy::U128 => I128,
102             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
103         }
104     }
105
106     /// Finds the appropriate Integer type and signedness for the given
107     /// signed discriminant range and `#[repr]` attribute.
108     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
109     /// that shouldn't affect anything, other than maybe debuginfo.
110     fn repr_discr<'tcx>(
111         tcx: TyCtxt<'tcx>,
112         ty: Ty<'tcx>,
113         repr: &ReprOptions,
114         min: i128,
115         max: i128,
116     ) -> (Integer, bool) {
117         // Theoretically, negative values could be larger in unsigned representation
118         // than the unsigned representation of the signed minimum. However, if there
119         // are any negative values, the only valid unsigned representation is u128
120         // which can fit all i128 values, so the result remains unaffected.
121         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
122         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
123
124         if let Some(ity) = repr.int {
125             let discr = Integer::from_attr(&tcx, ity);
126             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
127             if discr < fit {
128                 bug!(
129                     "Integer::repr_discr: `#[repr]` hint too small for \
130                       discriminant range of enum `{}",
131                     ty
132                 )
133             }
134             return (discr, ity.is_signed());
135         }
136
137         let at_least = if repr.c() {
138             // This is usually I32, however it can be different on some platforms,
139             // notably hexagon and arm-none/thumb-none
140             tcx.data_layout().c_enum_min_size
141         } else {
142             // repr(Rust) enums try to be as small as possible
143             I8
144         };
145
146         // If there are no negative values, we can use the unsigned fit.
147         if min >= 0 {
148             (cmp::max(unsigned_fit, at_least), false)
149         } else {
150             (cmp::max(signed_fit, at_least), true)
151         }
152     }
153 }
154
155 pub trait PrimitiveExt {
156     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
157     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 }
159
160 impl PrimitiveExt for Primitive {
161     #[inline]
162     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
163         match *self {
164             Int(i, signed) => i.to_ty(tcx, signed),
165             F32 => tcx.types.f32,
166             F64 => tcx.types.f64,
167             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
168         }
169     }
170
171     /// Return an *integer* type matching this primitive.
172     /// Useful in particular when dealing with enum discriminants.
173     #[inline]
174     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
175         match *self {
176             Int(i, signed) => i.to_ty(tcx, signed),
177             Pointer => tcx.types.usize,
178             F32 | F64 => bug!("floats do not have an int type"),
179         }
180     }
181 }
182
183 /// The first half of a fat pointer.
184 ///
185 /// - For a trait object, this is the address of the box.
186 /// - For a slice, this is the base address.
187 pub const FAT_PTR_ADDR: usize = 0;
188
189 /// The second half of a fat pointer.
190 ///
191 /// - For a trait object, this is the address of the vtable.
192 /// - For a slice, this is the length.
193 pub const FAT_PTR_EXTRA: usize = 1;
194
195 /// The maximum supported number of lanes in a SIMD vector.
196 ///
197 /// This value is selected based on backend support:
198 /// * LLVM does not appear to have a vector width limit.
199 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
200 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
201
202 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
203 pub enum LayoutError<'tcx> {
204     Unknown(Ty<'tcx>),
205     SizeOverflow(Ty<'tcx>),
206     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
207 }
208
209 impl<'tcx> fmt::Display for LayoutError<'tcx> {
210     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
211         match *self {
212             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
213             LayoutError::SizeOverflow(ty) => {
214                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
215             }
216             LayoutError::NormalizationFailure(t, e) => write!(
217                 f,
218                 "unable to determine layout for `{}` because `{}` cannot be normalized",
219                 t,
220                 e.get_type_for_failure()
221             ),
222         }
223     }
224 }
225
226 #[instrument(skip(tcx, query), level = "debug")]
227 fn layout_of<'tcx>(
228     tcx: TyCtxt<'tcx>,
229     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
230 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
231     let (param_env, ty) = query.into_parts();
232     debug!(?ty);
233
234     let param_env = param_env.with_reveal_all_normalized(tcx);
235     let unnormalized_ty = ty;
236
237     // FIXME: We might want to have two different versions of `layout_of`:
238     // One that can be called after typecheck has completed and can use
239     // `normalize_erasing_regions` here and another one that can be called
240     // before typecheck has completed and uses `try_normalize_erasing_regions`.
241     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
242         Ok(t) => t,
243         Err(normalization_error) => {
244             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
245         }
246     };
247
248     if ty != unnormalized_ty {
249         // Ensure this layout is also cached for the normalized type.
250         return tcx.layout_of(param_env.and(ty));
251     }
252
253     let cx = LayoutCx { tcx, param_env };
254
255     let layout = cx.layout_of_uncached(ty)?;
256     let layout = TyAndLayout { ty, layout };
257
258     cx.record_layout_for_printing(layout);
259
260     sanity_check_layout(&cx, &layout);
261
262     Ok(layout)
263 }
264
265 #[derive(Clone, Copy)]
266 pub struct LayoutCx<'tcx, C> {
267     pub tcx: C,
268     pub param_env: ty::ParamEnv<'tcx>,
269 }
270
271 #[derive(Copy, Clone, Debug)]
272 enum StructKind {
273     /// A tuple, closure, or univariant which cannot be coerced to unsized.
274     AlwaysSized,
275     /// A univariant, the last field of which may be coerced to unsized.
276     MaybeUnsized,
277     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
278     Prefixed(Size, Align),
279 }
280
281 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
282 // This is used to go between `memory_index` (source field order to memory order)
283 // and `inverse_memory_index` (memory order to source field order).
284 // See also `FieldsShape::Arbitrary::memory_index` for more details.
285 // FIXME(eddyb) build a better abstraction for permutations, if possible.
286 fn invert_mapping(map: &[u32]) -> Vec<u32> {
287     let mut inverse = vec![0; map.len()];
288     for i in 0..map.len() {
289         inverse[map[i] as usize] = i as u32;
290     }
291     inverse
292 }
293
294 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
295     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
296         let dl = self.data_layout();
297         let b_align = b.align(dl);
298         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
299         let b_offset = a.size(dl).align_to(b_align.abi);
300         let size = (b_offset + b.size(dl)).align_to(align.abi);
301
302         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
303         // returns the last maximum.
304         let largest_niche = Niche::from_scalar(dl, b_offset, b)
305             .into_iter()
306             .chain(Niche::from_scalar(dl, Size::ZERO, a))
307             .max_by_key(|niche| niche.available(dl));
308
309         LayoutS {
310             variants: Variants::Single { index: VariantIdx::new(0) },
311             fields: FieldsShape::Arbitrary {
312                 offsets: vec![Size::ZERO, b_offset],
313                 memory_index: vec![0, 1],
314             },
315             abi: Abi::ScalarPair(a, b),
316             largest_niche,
317             align,
318             size,
319         }
320     }
321
322     fn univariant_uninterned(
323         &self,
324         ty: Ty<'tcx>,
325         fields: &[TyAndLayout<'_>],
326         repr: &ReprOptions,
327         kind: StructKind,
328     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
329         let dl = self.data_layout();
330         let pack = repr.pack;
331         if pack.is_some() && repr.align.is_some() {
332             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
333             return Err(LayoutError::Unknown(ty));
334         }
335
336         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
337
338         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
339
340         let optimize = !repr.inhibit_struct_field_reordering_opt();
341         if optimize {
342             let end =
343                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
344             let optimizing = &mut inverse_memory_index[..end];
345             let field_align = |f: &TyAndLayout<'_>| {
346                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
347             };
348
349             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
350             // the field ordering to try and catch some code making assumptions about layouts
351             // we don't guarantee
352             if repr.can_randomize_type_layout() {
353                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
354                 // randomize field ordering with
355                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
356
357                 // Shuffle the ordering of the fields
358                 optimizing.shuffle(&mut rng);
359
360             // Otherwise we just leave things alone and actually optimize the type's fields
361             } else {
362                 match kind {
363                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
364                         optimizing.sort_by_key(|&x| {
365                             // Place ZSTs first to avoid "interesting offsets",
366                             // especially with only one or two non-ZST fields.
367                             let f = &fields[x as usize];
368                             (!f.is_zst(), cmp::Reverse(field_align(f)))
369                         });
370                     }
371
372                     StructKind::Prefixed(..) => {
373                         // Sort in ascending alignment so that the layout stays optimal
374                         // regardless of the prefix
375                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
376                     }
377                 }
378
379                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
380                 //                 regardless of the status of `-Z randomize-layout`
381             }
382         }
383
384         // inverse_memory_index holds field indices by increasing memory offset.
385         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
386         // We now write field offsets to the corresponding offset slot;
387         // field 5 with offset 0 puts 0 in offsets[5].
388         // At the bottom of this function, we invert `inverse_memory_index` to
389         // produce `memory_index` (see `invert_mapping`).
390
391         let mut sized = true;
392         let mut offsets = vec![Size::ZERO; fields.len()];
393         let mut offset = Size::ZERO;
394         let mut largest_niche = None;
395         let mut largest_niche_available = 0;
396
397         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
398             let prefix_align =
399                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
400             align = align.max(AbiAndPrefAlign::new(prefix_align));
401             offset = prefix_size.align_to(prefix_align);
402         }
403
404         for &i in &inverse_memory_index {
405             let field = fields[i as usize];
406             if !sized {
407                 self.tcx.sess.delay_span_bug(
408                     DUMMY_SP,
409                     &format!(
410                         "univariant: field #{} of `{}` comes after unsized field",
411                         offsets.len(),
412                         ty
413                     ),
414                 );
415             }
416
417             if field.is_unsized() {
418                 sized = false;
419             }
420
421             // Invariant: offset < dl.obj_size_bound() <= 1<<61
422             let field_align = if let Some(pack) = pack {
423                 field.align.min(AbiAndPrefAlign::new(pack))
424             } else {
425                 field.align
426             };
427             offset = offset.align_to(field_align.abi);
428             align = align.max(field_align);
429
430             debug!("univariant offset: {:?} field: {:#?}", offset, field);
431             offsets[i as usize] = offset;
432
433             if let Some(mut niche) = field.largest_niche {
434                 let available = niche.available(dl);
435                 if available > largest_niche_available {
436                     largest_niche_available = available;
437                     niche.offset += offset;
438                     largest_niche = Some(niche);
439                 }
440             }
441
442             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
443         }
444
445         if let Some(repr_align) = repr.align {
446             align = align.max(AbiAndPrefAlign::new(repr_align));
447         }
448
449         debug!("univariant min_size: {:?}", offset);
450         let min_size = offset;
451
452         // As stated above, inverse_memory_index holds field indices by increasing offset.
453         // This makes it an already-sorted view of the offsets vec.
454         // To invert it, consider:
455         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
456         // Field 5 would be the first element, so memory_index is i:
457         // Note: if we didn't optimize, it's already right.
458
459         let memory_index =
460             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
461
462         let size = min_size.align_to(align.abi);
463         let mut abi = Abi::Aggregate { sized };
464
465         // Unpack newtype ABIs and find scalar pairs.
466         if sized && size.bytes() > 0 {
467             // All other fields must be ZSTs.
468             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
469
470             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
471                 // We have exactly one non-ZST field.
472                 (Some((i, field)), None, None) => {
473                     // Field fills the struct and it has a scalar or scalar pair ABI.
474                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
475                     {
476                         match field.abi {
477                             // For plain scalars, or vectors of them, we can't unpack
478                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
479                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
480                                 abi = field.abi;
481                             }
482                             // But scalar pairs are Rust-specific and get
483                             // treated as aggregates by C ABIs anyway.
484                             Abi::ScalarPair(..) => {
485                                 abi = field.abi;
486                             }
487                             _ => {}
488                         }
489                     }
490                 }
491
492                 // Two non-ZST fields, and they're both scalars.
493                 (Some((i, a)), Some((j, b)), None) => {
494                     match (a.abi, b.abi) {
495                         (Abi::Scalar(a), Abi::Scalar(b)) => {
496                             // Order by the memory placement, not source order.
497                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
498                                 ((i, a), (j, b))
499                             } else {
500                                 ((j, b), (i, a))
501                             };
502                             let pair = self.scalar_pair(a, b);
503                             let pair_offsets = match pair.fields {
504                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
505                                     assert_eq!(memory_index, &[0, 1]);
506                                     offsets
507                                 }
508                                 _ => bug!(),
509                             };
510                             if offsets[i] == pair_offsets[0]
511                                 && offsets[j] == pair_offsets[1]
512                                 && align == pair.align
513                                 && size == pair.size
514                             {
515                                 // We can use `ScalarPair` only when it matches our
516                                 // already computed layout (including `#[repr(C)]`).
517                                 abi = pair.abi;
518                             }
519                         }
520                         _ => {}
521                     }
522                 }
523
524                 _ => {}
525             }
526         }
527
528         if fields.iter().any(|f| f.abi.is_uninhabited()) {
529             abi = Abi::Uninhabited;
530         }
531
532         Ok(LayoutS {
533             variants: Variants::Single { index: VariantIdx::new(0) },
534             fields: FieldsShape::Arbitrary { offsets, memory_index },
535             abi,
536             largest_niche,
537             align,
538             size,
539         })
540     }
541
542     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
543         let tcx = self.tcx;
544         let param_env = self.param_env;
545         let dl = self.data_layout();
546         let scalar_unit = |value: Primitive| {
547             let size = value.size(dl);
548             assert!(size.bits() <= 128);
549             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
550         };
551         let scalar =
552             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
553
554         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
555             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
556         };
557         debug_assert!(!ty.has_infer_types_or_consts());
558
559         Ok(match *ty.kind() {
560             // Basic scalars.
561             ty::Bool => tcx.intern_layout(LayoutS::scalar(
562                 self,
563                 Scalar::Initialized {
564                     value: Int(I8, false),
565                     valid_range: WrappingRange { start: 0, end: 1 },
566                 },
567             )),
568             ty::Char => tcx.intern_layout(LayoutS::scalar(
569                 self,
570                 Scalar::Initialized {
571                     value: Int(I32, false),
572                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
573                 },
574             )),
575             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
576             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
577             ty::Float(fty) => scalar(match fty {
578                 ty::FloatTy::F32 => F32,
579                 ty::FloatTy::F64 => F64,
580             }),
581             ty::FnPtr(_) => {
582                 let mut ptr = scalar_unit(Pointer);
583                 ptr.valid_range_mut().start = 1;
584                 tcx.intern_layout(LayoutS::scalar(self, ptr))
585             }
586
587             // The never type.
588             ty::Never => tcx.intern_layout(LayoutS {
589                 variants: Variants::Single { index: VariantIdx::new(0) },
590                 fields: FieldsShape::Primitive,
591                 abi: Abi::Uninhabited,
592                 largest_niche: None,
593                 align: dl.i8_align,
594                 size: Size::ZERO,
595             }),
596
597             // Potentially-wide pointers.
598             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
599                 let mut data_ptr = scalar_unit(Pointer);
600                 if !ty.is_unsafe_ptr() {
601                     data_ptr.valid_range_mut().start = 1;
602                 }
603
604                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
605                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
606                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
607                 }
608
609                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
610                 let metadata = match unsized_part.kind() {
611                     ty::Foreign(..) => {
612                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
613                     }
614                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
615                     ty::Dynamic(..) => {
616                         let mut vtable = scalar_unit(Pointer);
617                         vtable.valid_range_mut().start = 1;
618                         vtable
619                     }
620                     _ => return Err(LayoutError::Unknown(unsized_part)),
621                 };
622
623                 // Effectively a (ptr, meta) tuple.
624                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
625             }
626
627             ty::Dynamic(_, _, ty::DynStar) => {
628                 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
629                 data.valid_range_mut().start = 0;
630                 let mut vtable = scalar_unit(Pointer);
631                 vtable.valid_range_mut().start = 1;
632                 tcx.intern_layout(self.scalar_pair(data, vtable))
633             }
634
635             // Arrays and slices.
636             ty::Array(element, mut count) => {
637                 if count.has_projections() {
638                     count = tcx.normalize_erasing_regions(param_env, count);
639                     if count.has_projections() {
640                         return Err(LayoutError::Unknown(ty));
641                     }
642                 }
643
644                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
645                 let element = self.layout_of(element)?;
646                 let size =
647                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
648
649                 let abi =
650                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
651                         Abi::Uninhabited
652                     } else {
653                         Abi::Aggregate { sized: true }
654                     };
655
656                 let largest_niche = if count != 0 { element.largest_niche } else { None };
657
658                 tcx.intern_layout(LayoutS {
659                     variants: Variants::Single { index: VariantIdx::new(0) },
660                     fields: FieldsShape::Array { stride: element.size, count },
661                     abi,
662                     largest_niche,
663                     align: element.align,
664                     size,
665                 })
666             }
667             ty::Slice(element) => {
668                 let element = self.layout_of(element)?;
669                 tcx.intern_layout(LayoutS {
670                     variants: Variants::Single { index: VariantIdx::new(0) },
671                     fields: FieldsShape::Array { stride: element.size, count: 0 },
672                     abi: Abi::Aggregate { sized: false },
673                     largest_niche: None,
674                     align: element.align,
675                     size: Size::ZERO,
676                 })
677             }
678             ty::Str => tcx.intern_layout(LayoutS {
679                 variants: Variants::Single { index: VariantIdx::new(0) },
680                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
681                 abi: Abi::Aggregate { sized: false },
682                 largest_niche: None,
683                 align: dl.i8_align,
684                 size: Size::ZERO,
685             }),
686
687             // Odd unit types.
688             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
689             ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
690                 let mut unit = self.univariant_uninterned(
691                     ty,
692                     &[],
693                     &ReprOptions::default(),
694                     StructKind::AlwaysSized,
695                 )?;
696                 match unit.abi {
697                     Abi::Aggregate { ref mut sized } => *sized = false,
698                     _ => bug!(),
699                 }
700                 tcx.intern_layout(unit)
701             }
702
703             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
704
705             ty::Closure(_, ref substs) => {
706                 let tys = substs.as_closure().upvar_tys();
707                 univariant(
708                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
709                     &ReprOptions::default(),
710                     StructKind::AlwaysSized,
711                 )?
712             }
713
714             ty::Tuple(tys) => {
715                 let kind =
716                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
717
718                 univariant(
719                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
720                     &ReprOptions::default(),
721                     kind,
722                 )?
723             }
724
725             // SIMD vector types.
726             ty::Adt(def, substs) if def.repr().simd() => {
727                 if !def.is_struct() {
728                     // Should have yielded E0517 by now.
729                     tcx.sess.delay_span_bug(
730                         DUMMY_SP,
731                         "#[repr(simd)] was applied to an ADT that is not a struct",
732                     );
733                     return Err(LayoutError::Unknown(ty));
734                 }
735
736                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
737                 //
738                 // * #[repr(simd)] struct S(T, T, T, T);
739                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
740                 // * #[repr(simd)] struct S([T; 4])
741                 //
742                 // where T is a primitive scalar (integer/float/pointer).
743
744                 // SIMD vectors with zero fields are not supported.
745                 // (should be caught by typeck)
746                 if def.non_enum_variant().fields.is_empty() {
747                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
748                 }
749
750                 // Type of the first ADT field:
751                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
752
753                 // Heterogeneous SIMD vectors are not supported:
754                 // (should be caught by typeck)
755                 for fi in &def.non_enum_variant().fields {
756                     if fi.ty(tcx, substs) != f0_ty {
757                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
758                     }
759                 }
760
761                 // The element type and number of elements of the SIMD vector
762                 // are obtained from:
763                 //
764                 // * the element type and length of the single array field, if
765                 // the first field is of array type, or
766                 //
767                 // * the homogeneous field type and the number of fields.
768                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
769                     // First ADT field is an array:
770
771                     // SIMD vectors with multiple array fields are not supported:
772                     // (should be caught by typeck)
773                     if def.non_enum_variant().fields.len() != 1 {
774                         tcx.sess.fatal(&format!(
775                             "monomorphising SIMD type `{}` with more than one array field",
776                             ty
777                         ));
778                     }
779
780                     // Extract the number of elements from the layout of the array field:
781                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
782                         return Err(LayoutError::Unknown(ty));
783                     };
784
785                     (*e_ty, *count, true)
786                 } else {
787                     // First ADT field is not an array:
788                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
789                 };
790
791                 // SIMD vectors of zero length are not supported.
792                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
793                 // support.
794                 //
795                 // Can't be caught in typeck if the array length is generic.
796                 if e_len == 0 {
797                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
798                 } else if e_len > MAX_SIMD_LANES {
799                     tcx.sess.fatal(&format!(
800                         "monomorphising SIMD type `{}` of length greater than {}",
801                         ty, MAX_SIMD_LANES,
802                     ));
803                 }
804
805                 // Compute the ABI of the element type:
806                 let e_ly = self.layout_of(e_ty)?;
807                 let Abi::Scalar(e_abi) = e_ly.abi else {
808                     // This error isn't caught in typeck, e.g., if
809                     // the element type of the vector is generic.
810                     tcx.sess.fatal(&format!(
811                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
812                         (integer/float/pointer) element type `{}`",
813                         ty, e_ty
814                     ))
815                 };
816
817                 // Compute the size and alignment of the vector:
818                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
819                 let align = dl.vector_align(size);
820                 let size = size.align_to(align.abi);
821
822                 // Compute the placement of the vector fields:
823                 let fields = if is_array {
824                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
825                 } else {
826                     FieldsShape::Array { stride: e_ly.size, count: e_len }
827                 };
828
829                 tcx.intern_layout(LayoutS {
830                     variants: Variants::Single { index: VariantIdx::new(0) },
831                     fields,
832                     abi: Abi::Vector { element: e_abi, count: e_len },
833                     largest_niche: e_ly.largest_niche,
834                     size,
835                     align,
836                 })
837             }
838
839             // ADTs.
840             ty::Adt(def, substs) => {
841                 // Cache the field layouts.
842                 let variants = def
843                     .variants()
844                     .iter()
845                     .map(|v| {
846                         v.fields
847                             .iter()
848                             .map(|field| self.layout_of(field.ty(tcx, substs)))
849                             .collect::<Result<Vec<_>, _>>()
850                     })
851                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
852
853                 if def.is_union() {
854                     if def.repr().pack.is_some() && def.repr().align.is_some() {
855                         self.tcx.sess.delay_span_bug(
856                             tcx.def_span(def.did()),
857                             "union cannot be packed and aligned",
858                         );
859                         return Err(LayoutError::Unknown(ty));
860                     }
861
862                     let mut align =
863                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
864
865                     if let Some(repr_align) = def.repr().align {
866                         align = align.max(AbiAndPrefAlign::new(repr_align));
867                     }
868
869                     let optimize = !def.repr().inhibit_union_abi_opt();
870                     let mut size = Size::ZERO;
871                     let mut abi = Abi::Aggregate { sized: true };
872                     let index = VariantIdx::new(0);
873                     for field in &variants[index] {
874                         assert!(!field.is_unsized());
875                         align = align.max(field.align);
876
877                         // If all non-ZST fields have the same ABI, forward this ABI
878                         if optimize && !field.is_zst() {
879                             // Discard valid range information and allow undef
880                             let field_abi = match field.abi {
881                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
882                                 Abi::ScalarPair(x, y) => {
883                                     Abi::ScalarPair(x.to_union(), y.to_union())
884                                 }
885                                 Abi::Vector { element: x, count } => {
886                                     Abi::Vector { element: x.to_union(), count }
887                                 }
888                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
889                                     Abi::Aggregate { sized: true }
890                                 }
891                             };
892
893                             if size == Size::ZERO {
894                                 // first non ZST: initialize 'abi'
895                                 abi = field_abi;
896                             } else if abi != field_abi {
897                                 // different fields have different ABI: reset to Aggregate
898                                 abi = Abi::Aggregate { sized: true };
899                             }
900                         }
901
902                         size = cmp::max(size, field.size);
903                     }
904
905                     if let Some(pack) = def.repr().pack {
906                         align = align.min(AbiAndPrefAlign::new(pack));
907                     }
908
909                     return Ok(tcx.intern_layout(LayoutS {
910                         variants: Variants::Single { index },
911                         fields: FieldsShape::Union(
912                             NonZeroUsize::new(variants[index].len())
913                                 .ok_or(LayoutError::Unknown(ty))?,
914                         ),
915                         abi,
916                         largest_niche: None,
917                         align,
918                         size: size.align_to(align.abi),
919                     }));
920                 }
921
922                 // A variant is absent if it's uninhabited and only has ZST fields.
923                 // Present uninhabited variants only require space for their fields,
924                 // but *not* an encoding of the discriminant (e.g., a tag value).
925                 // See issue #49298 for more details on the need to leave space
926                 // for non-ZST uninhabited data (mostly partial initialization).
927                 let absent = |fields: &[TyAndLayout<'_>]| {
928                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
929                     let is_zst = fields.iter().all(|f| f.is_zst());
930                     uninhabited && is_zst
931                 };
932                 let (present_first, present_second) = {
933                     let mut present_variants = variants
934                         .iter_enumerated()
935                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
936                     (present_variants.next(), present_variants.next())
937                 };
938                 let present_first = match present_first {
939                     Some(present_first) => present_first,
940                     // Uninhabited because it has no variants, or only absent ones.
941                     None if def.is_enum() => {
942                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
943                     }
944                     // If it's a struct, still compute a layout so that we can still compute the
945                     // field offsets.
946                     None => VariantIdx::new(0),
947                 };
948
949                 let is_struct = !def.is_enum() ||
950                     // Only one variant is present.
951                     (present_second.is_none() &&
952                     // Representation optimizations are allowed.
953                     !def.repr().inhibit_enum_layout_opt());
954                 if is_struct {
955                     // Struct, or univariant enum equivalent to a struct.
956                     // (Typechecking will reject discriminant-sizing attrs.)
957
958                     let v = present_first;
959                     let kind = if def.is_enum() || variants[v].is_empty() {
960                         StructKind::AlwaysSized
961                     } else {
962                         let param_env = tcx.param_env(def.did());
963                         let last_field = def.variant(v).fields.last().unwrap();
964                         let always_sized =
965                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
966                         if !always_sized {
967                             StructKind::MaybeUnsized
968                         } else {
969                             StructKind::AlwaysSized
970                         }
971                     };
972
973                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
974                     st.variants = Variants::Single { index: v };
975
976                     if def.is_unsafe_cell() {
977                         let hide_niches = |scalar: &mut _| match scalar {
978                             Scalar::Initialized { value, valid_range } => {
979                                 *valid_range = WrappingRange::full(value.size(dl))
980                             }
981                             // Already doesn't have any niches
982                             Scalar::Union { .. } => {}
983                         };
984                         match &mut st.abi {
985                             Abi::Uninhabited => {}
986                             Abi::Scalar(scalar) => hide_niches(scalar),
987                             Abi::ScalarPair(a, b) => {
988                                 hide_niches(a);
989                                 hide_niches(b);
990                             }
991                             Abi::Vector { element, count: _ } => hide_niches(element),
992                             Abi::Aggregate { sized: _ } => {}
993                         }
994                         st.largest_niche = None;
995                         return Ok(tcx.intern_layout(st));
996                     }
997
998                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
999                     match st.abi {
1000                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1001                             // the asserts ensure that we are not using the
1002                             // `#[rustc_layout_scalar_valid_range(n)]`
1003                             // attribute to widen the range of anything as that would probably
1004                             // result in UB somewhere
1005                             // FIXME(eddyb) the asserts are probably not needed,
1006                             // as larger validity ranges would result in missed
1007                             // optimizations, *not* wrongly assuming the inner
1008                             // value is valid. e.g. unions enlarge validity ranges,
1009                             // because the values may be uninitialized.
1010                             if let Bound::Included(start) = start {
1011                                 // FIXME(eddyb) this might be incorrect - it doesn't
1012                                 // account for wrap-around (end < start) ranges.
1013                                 let valid_range = scalar.valid_range_mut();
1014                                 assert!(valid_range.start <= start);
1015                                 valid_range.start = start;
1016                             }
1017                             if let Bound::Included(end) = end {
1018                                 // FIXME(eddyb) this might be incorrect - it doesn't
1019                                 // account for wrap-around (end < start) ranges.
1020                                 let valid_range = scalar.valid_range_mut();
1021                                 assert!(valid_range.end >= end);
1022                                 valid_range.end = end;
1023                             }
1024
1025                             // Update `largest_niche` if we have introduced a larger niche.
1026                             let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1027                             if let Some(niche) = niche {
1028                                 match st.largest_niche {
1029                                     Some(largest_niche) => {
1030                                         // Replace the existing niche even if they're equal,
1031                                         // because this one is at a lower offset.
1032                                         if largest_niche.available(dl) <= niche.available(dl) {
1033                                             st.largest_niche = Some(niche);
1034                                         }
1035                                     }
1036                                     None => st.largest_niche = Some(niche),
1037                                 }
1038                             }
1039                         }
1040                         _ => assert!(
1041                             start == Bound::Unbounded && end == Bound::Unbounded,
1042                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1043                             def,
1044                             st,
1045                         ),
1046                     }
1047
1048                     return Ok(tcx.intern_layout(st));
1049                 }
1050
1051                 // At this point, we have handled all unions and
1052                 // structs. (We have also handled univariant enums
1053                 // that allow representation optimization.)
1054                 assert!(def.is_enum());
1055
1056                 // Until we've decided whether to use the tagged or
1057                 // niche filling LayoutS, we don't want to intern the
1058                 // variant layouts, so we can't store them in the
1059                 // overall LayoutS. Store the overall LayoutS
1060                 // and the variant LayoutSs here until then.
1061                 struct TmpLayout<'tcx> {
1062                     layout: LayoutS<'tcx>,
1063                     variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1064                 }
1065
1066                 let calculate_niche_filling_layout =
1067                     || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1068                         // The current code for niche-filling relies on variant indices
1069                         // instead of actual discriminants, so enums with
1070                         // explicit discriminants (RFC #2363) would misbehave.
1071                         if def.repr().inhibit_enum_layout_opt()
1072                             || def
1073                                 .variants()
1074                                 .iter_enumerated()
1075                                 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1076                         {
1077                             return Ok(None);
1078                         }
1079
1080                         if variants.len() < 2 {
1081                             return Ok(None);
1082                         }
1083
1084                         let mut align = dl.aggregate_align;
1085                         let mut variant_layouts = variants
1086                             .iter_enumerated()
1087                             .map(|(j, v)| {
1088                                 let mut st = self.univariant_uninterned(
1089                                     ty,
1090                                     v,
1091                                     &def.repr(),
1092                                     StructKind::AlwaysSized,
1093                                 )?;
1094                                 st.variants = Variants::Single { index: j };
1095
1096                                 align = align.max(st.align);
1097
1098                                 Ok(st)
1099                             })
1100                             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1101
1102                         let largest_variant_index = match variant_layouts
1103                             .iter_enumerated()
1104                             .max_by_key(|(_i, layout)| layout.size.bytes())
1105                             .map(|(i, _layout)| i)
1106                         {
1107                             None => return Ok(None),
1108                             Some(i) => i,
1109                         };
1110
1111                         let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1112                         let needs_disc = |index: VariantIdx| {
1113                             index != largest_variant_index && !absent(&variants[index])
1114                         };
1115                         let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1116                             ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1117
1118                         let count = niche_variants.size_hint().1.unwrap() as u128;
1119
1120                         // Find the field with the largest niche
1121                         let (field_index, niche, (niche_start, niche_scalar)) = match variants
1122                             [largest_variant_index]
1123                             .iter()
1124                             .enumerate()
1125                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1126                             .max_by_key(|(_, niche)| niche.available(dl))
1127                             .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1128                         {
1129                             None => return Ok(None),
1130                             Some(x) => x,
1131                         };
1132
1133                         let niche_offset = niche.offset
1134                             + variant_layouts[largest_variant_index].fields.offset(field_index);
1135                         let niche_size = niche.value.size(dl);
1136                         let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1137
1138                         let all_variants_fit =
1139                             variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1140                                 if i == largest_variant_index {
1141                                     return true;
1142                                 }
1143
1144                                 layout.largest_niche = None;
1145
1146                                 if layout.size <= niche_offset {
1147                                     // This variant will fit before the niche.
1148                                     return true;
1149                                 }
1150
1151                                 // Determine if it'll fit after the niche.
1152                                 let this_align = layout.align.abi;
1153                                 let this_offset = (niche_offset + niche_size).align_to(this_align);
1154
1155                                 if this_offset + layout.size > size {
1156                                     return false;
1157                                 }
1158
1159                                 // It'll fit, but we need to make some adjustments.
1160                                 match layout.fields {
1161                                     FieldsShape::Arbitrary { ref mut offsets, .. } => {
1162                                         for (j, offset) in offsets.iter_mut().enumerate() {
1163                                             if !variants[i][j].is_zst() {
1164                                                 *offset += this_offset;
1165                                             }
1166                                         }
1167                                     }
1168                                     _ => {
1169                                         panic!("Layout of fields should be Arbitrary for variants")
1170                                     }
1171                                 }
1172
1173                                 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1174                                 if !layout.abi.is_uninhabited() {
1175                                     layout.abi = Abi::Aggregate { sized: true };
1176                                 }
1177                                 layout.size += this_offset;
1178
1179                                 true
1180                             });
1181
1182                         if !all_variants_fit {
1183                             return Ok(None);
1184                         }
1185
1186                         let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1187
1188                         let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1189                             i == largest_variant_index || layout.size == Size::ZERO
1190                         });
1191                         let same_size = size == variant_layouts[largest_variant_index].size;
1192                         let same_align = align == variant_layouts[largest_variant_index].align;
1193
1194                         let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1195                             Abi::Uninhabited
1196                         } else if same_size && same_align && others_zst {
1197                             match variant_layouts[largest_variant_index].abi {
1198                                 // When the total alignment and size match, we can use the
1199                                 // same ABI as the scalar variant with the reserved niche.
1200                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1201                                 Abi::ScalarPair(first, second) => {
1202                                     // Only the niche is guaranteed to be initialised,
1203                                     // so use union layouts for the other primitive.
1204                                     if niche_offset == Size::ZERO {
1205                                         Abi::ScalarPair(niche_scalar, second.to_union())
1206                                     } else {
1207                                         Abi::ScalarPair(first.to_union(), niche_scalar)
1208                                     }
1209                                 }
1210                                 _ => Abi::Aggregate { sized: true },
1211                             }
1212                         } else {
1213                             Abi::Aggregate { sized: true }
1214                         };
1215
1216                         let layout = LayoutS {
1217                             variants: Variants::Multiple {
1218                                 tag: niche_scalar,
1219                                 tag_encoding: TagEncoding::Niche {
1220                                     untagged_variant: largest_variant_index,
1221                                     niche_variants,
1222                                     niche_start,
1223                                 },
1224                                 tag_field: 0,
1225                                 variants: IndexVec::new(),
1226                             },
1227                             fields: FieldsShape::Arbitrary {
1228                                 offsets: vec![niche_offset],
1229                                 memory_index: vec![0],
1230                             },
1231                             abi,
1232                             largest_niche,
1233                             size,
1234                             align,
1235                         };
1236
1237                         Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1238                     };
1239
1240                 let niche_filling_layout = calculate_niche_filling_layout()?;
1241
1242                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1243                 let discr_type = def.repr().discr_type();
1244                 let bits = Integer::from_attr(self, discr_type).size().bits();
1245                 for (i, discr) in def.discriminants(tcx) {
1246                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1247                         continue;
1248                     }
1249                     let mut x = discr.val as i128;
1250                     if discr_type.is_signed() {
1251                         // sign extend the raw representation to be an i128
1252                         x = (x << (128 - bits)) >> (128 - bits);
1253                     }
1254                     if x < min {
1255                         min = x;
1256                     }
1257                     if x > max {
1258                         max = x;
1259                     }
1260                 }
1261                 // We might have no inhabited variants, so pretend there's at least one.
1262                 if (min, max) == (i128::MAX, i128::MIN) {
1263                     min = 0;
1264                     max = 0;
1265                 }
1266                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1267                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1268
1269                 let mut align = dl.aggregate_align;
1270                 let mut size = Size::ZERO;
1271
1272                 // We're interested in the smallest alignment, so start large.
1273                 let mut start_align = Align::from_bytes(256).unwrap();
1274                 assert_eq!(Integer::for_align(dl, start_align), None);
1275
1276                 // repr(C) on an enum tells us to make a (tag, union) layout,
1277                 // so we need to grow the prefix alignment to be at least
1278                 // the alignment of the union. (This value is used both for
1279                 // determining the alignment of the overall enum, and the
1280                 // determining the alignment of the payload after the tag.)
1281                 let mut prefix_align = min_ity.align(dl).abi;
1282                 if def.repr().c() {
1283                     for fields in &variants {
1284                         for field in fields {
1285                             prefix_align = prefix_align.max(field.align.abi);
1286                         }
1287                     }
1288                 }
1289
1290                 // Create the set of structs that represent each variant.
1291                 let mut layout_variants = variants
1292                     .iter_enumerated()
1293                     .map(|(i, field_layouts)| {
1294                         let mut st = self.univariant_uninterned(
1295                             ty,
1296                             &field_layouts,
1297                             &def.repr(),
1298                             StructKind::Prefixed(min_ity.size(), prefix_align),
1299                         )?;
1300                         st.variants = Variants::Single { index: i };
1301                         // Find the first field we can't move later
1302                         // to make room for a larger discriminant.
1303                         for field in
1304                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1305                         {
1306                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1307                                 start_align = start_align.min(field.align.abi);
1308                                 break;
1309                             }
1310                         }
1311                         size = cmp::max(size, st.size);
1312                         align = align.max(st.align);
1313                         Ok(st)
1314                     })
1315                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1316
1317                 // Align the maximum variant size to the largest alignment.
1318                 size = size.align_to(align.abi);
1319
1320                 if size.bytes() >= dl.obj_size_bound() {
1321                     return Err(LayoutError::SizeOverflow(ty));
1322                 }
1323
1324                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1325                 if typeck_ity < min_ity {
1326                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1327                     // some reason at this point (based on values discriminant can take on). Mostly
1328                     // because this discriminant will be loaded, and then stored into variable of
1329                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1330                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1331                     // discriminant values. That would be a bug, because then, in codegen, in order
1332                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1333                     // space necessary to represent would have to be discarded (or layout is wrong
1334                     // on thinking it needs 16 bits)
1335                     bug!(
1336                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1337                         min_ity,
1338                         typeck_ity
1339                     );
1340                     // However, it is fine to make discr type however large (as an optimisation)
1341                     // after this point â€“ we’ll just truncate the value we load in codegen.
1342                 }
1343
1344                 // Check to see if we should use a different type for the
1345                 // discriminant. We can safely use a type with the same size
1346                 // as the alignment of the first field of each variant.
1347                 // We increase the size of the discriminant to avoid LLVM copying
1348                 // padding when it doesn't need to. This normally causes unaligned
1349                 // load/stores and excessive memcpy/memset operations. By using a
1350                 // bigger integer size, LLVM can be sure about its contents and
1351                 // won't be so conservative.
1352
1353                 // Use the initial field alignment
1354                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1355                     min_ity
1356                 } else {
1357                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1358                 };
1359
1360                 // If the alignment is not larger than the chosen discriminant size,
1361                 // don't use the alignment as the final size.
1362                 if ity <= min_ity {
1363                     ity = min_ity;
1364                 } else {
1365                     // Patch up the variants' first few fields.
1366                     let old_ity_size = min_ity.size();
1367                     let new_ity_size = ity.size();
1368                     for variant in &mut layout_variants {
1369                         match variant.fields {
1370                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1371                                 for i in offsets {
1372                                     if *i <= old_ity_size {
1373                                         assert_eq!(*i, old_ity_size);
1374                                         *i = new_ity_size;
1375                                     }
1376                                 }
1377                                 // We might be making the struct larger.
1378                                 if variant.size <= old_ity_size {
1379                                     variant.size = new_ity_size;
1380                                 }
1381                             }
1382                             _ => bug!(),
1383                         }
1384                     }
1385                 }
1386
1387                 let tag_mask = ity.size().unsigned_int_max();
1388                 let tag = Scalar::Initialized {
1389                     value: Int(ity, signed),
1390                     valid_range: WrappingRange {
1391                         start: (min as u128 & tag_mask),
1392                         end: (max as u128 & tag_mask),
1393                     },
1394                 };
1395                 let mut abi = Abi::Aggregate { sized: true };
1396
1397                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1398                     abi = Abi::Uninhabited;
1399                 } else if tag.size(dl) == size {
1400                     // Make sure we only use scalar layout when the enum is entirely its
1401                     // own tag (i.e. it has no padding nor any non-ZST variant fields).
1402                     abi = Abi::Scalar(tag);
1403                 } else {
1404                     // Try to use a ScalarPair for all tagged enums.
1405                     let mut common_prim = None;
1406                     let mut common_prim_initialized_in_all_variants = true;
1407                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1408                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1409                             bug!();
1410                         };
1411                         let mut fields =
1412                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1413                         let (field, offset) = match (fields.next(), fields.next()) {
1414                             (None, None) => {
1415                                 common_prim_initialized_in_all_variants = false;
1416                                 continue;
1417                             }
1418                             (Some(pair), None) => pair,
1419                             _ => {
1420                                 common_prim = None;
1421                                 break;
1422                             }
1423                         };
1424                         let prim = match field.abi {
1425                             Abi::Scalar(scalar) => {
1426                                 common_prim_initialized_in_all_variants &=
1427                                     matches!(scalar, Scalar::Initialized { .. });
1428                                 scalar.primitive()
1429                             }
1430                             _ => {
1431                                 common_prim = None;
1432                                 break;
1433                             }
1434                         };
1435                         if let Some(pair) = common_prim {
1436                             // This is pretty conservative. We could go fancier
1437                             // by conflating things like i32 and u32, or even
1438                             // realising that (u8, u8) could just cohabit with
1439                             // u16 or even u32.
1440                             if pair != (prim, offset) {
1441                                 common_prim = None;
1442                                 break;
1443                             }
1444                         } else {
1445                             common_prim = Some((prim, offset));
1446                         }
1447                     }
1448                     if let Some((prim, offset)) = common_prim {
1449                         let prim_scalar = if common_prim_initialized_in_all_variants {
1450                             scalar_unit(prim)
1451                         } else {
1452                             // Common prim might be uninit.
1453                             Scalar::Union { value: prim }
1454                         };
1455                         let pair = self.scalar_pair(tag, prim_scalar);
1456                         let pair_offsets = match pair.fields {
1457                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1458                                 assert_eq!(memory_index, &[0, 1]);
1459                                 offsets
1460                             }
1461                             _ => bug!(),
1462                         };
1463                         if pair_offsets[0] == Size::ZERO
1464                             && pair_offsets[1] == *offset
1465                             && align == pair.align
1466                             && size == pair.size
1467                         {
1468                             // We can use `ScalarPair` only when it matches our
1469                             // already computed layout (including `#[repr(C)]`).
1470                             abi = pair.abi;
1471                         }
1472                     }
1473                 }
1474
1475                 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1476                 // variants to ensure they are consistent. This is because a downcast is
1477                 // semantically a NOP, and thus should not affect layout.
1478                 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1479                     for variant in &mut layout_variants {
1480                         // We only do this for variants with fields; the others are not accessed anyway.
1481                         // Also do not overwrite any already existing "clever" ABIs.
1482                         if variant.fields.count() > 0
1483                             && matches!(variant.abi, Abi::Aggregate { .. })
1484                         {
1485                             variant.abi = abi;
1486                             // Also need to bump up the size and alignment, so that the entire value fits in here.
1487                             variant.size = cmp::max(variant.size, size);
1488                             variant.align.abi = cmp::max(variant.align.abi, align.abi);
1489                         }
1490                     }
1491                 }
1492
1493                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1494
1495                 let tagged_layout = LayoutS {
1496                     variants: Variants::Multiple {
1497                         tag,
1498                         tag_encoding: TagEncoding::Direct,
1499                         tag_field: 0,
1500                         variants: IndexVec::new(),
1501                     },
1502                     fields: FieldsShape::Arbitrary {
1503                         offsets: vec![Size::ZERO],
1504                         memory_index: vec![0],
1505                     },
1506                     largest_niche,
1507                     abi,
1508                     align,
1509                     size,
1510                 };
1511
1512                 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1513
1514                 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1515                     (tl, Some(nl)) => {
1516                         // Pick the smaller layout; otherwise,
1517                         // pick the layout with the larger niche; otherwise,
1518                         // pick tagged as it has simpler codegen.
1519                         use Ordering::*;
1520                         let niche_size = |tmp_l: &TmpLayout<'_>| {
1521                             tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1522                         };
1523                         match (
1524                             tl.layout.size.cmp(&nl.layout.size),
1525                             niche_size(&tl).cmp(&niche_size(&nl)),
1526                         ) {
1527                             (Greater, _) => nl,
1528                             (Equal, Less) => nl,
1529                             _ => tl,
1530                         }
1531                     }
1532                     (tl, None) => tl,
1533                 };
1534
1535                 // Now we can intern the variant layouts and store them in the enum layout.
1536                 best_layout.layout.variants = match best_layout.layout.variants {
1537                     Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1538                         tag,
1539                         tag_encoding,
1540                         tag_field,
1541                         variants: best_layout
1542                             .variants
1543                             .into_iter()
1544                             .map(|layout| tcx.intern_layout(layout))
1545                             .collect(),
1546                     },
1547                     _ => bug!(),
1548                 };
1549
1550                 tcx.intern_layout(best_layout.layout)
1551             }
1552
1553             // Types with no meaningful known layout.
1554             ty::Projection(_) | ty::Opaque(..) => {
1555                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1556                 // if that was possible, so there's no reason to try again here.
1557                 return Err(LayoutError::Unknown(ty));
1558             }
1559
1560             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1561                 bug!("Layout::compute: unexpected type `{}`", ty)
1562             }
1563
1564             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1565                 return Err(LayoutError::Unknown(ty));
1566             }
1567         })
1568     }
1569 }
1570
1571 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1572 #[derive(Clone, Debug, PartialEq)]
1573 enum SavedLocalEligibility {
1574     Unassigned,
1575     Assigned(VariantIdx),
1576     // FIXME: Use newtype_index so we aren't wasting bytes
1577     Ineligible(Option<u32>),
1578 }
1579
1580 // When laying out generators, we divide our saved local fields into two
1581 // categories: overlap-eligible and overlap-ineligible.
1582 //
1583 // Those fields which are ineligible for overlap go in a "prefix" at the
1584 // beginning of the layout, and always have space reserved for them.
1585 //
1586 // Overlap-eligible fields are only assigned to one variant, so we lay
1587 // those fields out for each variant and put them right after the
1588 // prefix.
1589 //
1590 // Finally, in the layout details, we point to the fields from the
1591 // variants they are assigned to. It is possible for some fields to be
1592 // included in multiple variants. No field ever "moves around" in the
1593 // layout; its offset is always the same.
1594 //
1595 // Also included in the layout are the upvars and the discriminant.
1596 // These are included as fields on the "outer" layout; they are not part
1597 // of any variant.
1598 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1599     /// Compute the eligibility and assignment of each local.
1600     fn generator_saved_local_eligibility(
1601         &self,
1602         info: &GeneratorLayout<'tcx>,
1603     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1604         use SavedLocalEligibility::*;
1605
1606         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1607             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1608
1609         // The saved locals not eligible for overlap. These will get
1610         // "promoted" to the prefix of our generator.
1611         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1612
1613         // Figure out which of our saved locals are fields in only
1614         // one variant. The rest are deemed ineligible for overlap.
1615         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1616             for local in fields {
1617                 match assignments[*local] {
1618                     Unassigned => {
1619                         assignments[*local] = Assigned(variant_index);
1620                     }
1621                     Assigned(idx) => {
1622                         // We've already seen this local at another suspension
1623                         // point, so it is no longer a candidate.
1624                         trace!(
1625                             "removing local {:?} in >1 variant ({:?}, {:?})",
1626                             local,
1627                             variant_index,
1628                             idx
1629                         );
1630                         ineligible_locals.insert(*local);
1631                         assignments[*local] = Ineligible(None);
1632                     }
1633                     Ineligible(_) => {}
1634                 }
1635             }
1636         }
1637
1638         // Next, check every pair of eligible locals to see if they
1639         // conflict.
1640         for local_a in info.storage_conflicts.rows() {
1641             let conflicts_a = info.storage_conflicts.count(local_a);
1642             if ineligible_locals.contains(local_a) {
1643                 continue;
1644             }
1645
1646             for local_b in info.storage_conflicts.iter(local_a) {
1647                 // local_a and local_b are storage live at the same time, therefore they
1648                 // cannot overlap in the generator layout. The only way to guarantee
1649                 // this is if they are in the same variant, or one is ineligible
1650                 // (which means it is stored in every variant).
1651                 if ineligible_locals.contains(local_b)
1652                     || assignments[local_a] == assignments[local_b]
1653                 {
1654                     continue;
1655                 }
1656
1657                 // If they conflict, we will choose one to make ineligible.
1658                 // This is not always optimal; it's just a greedy heuristic that
1659                 // seems to produce good results most of the time.
1660                 let conflicts_b = info.storage_conflicts.count(local_b);
1661                 let (remove, other) =
1662                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1663                 ineligible_locals.insert(remove);
1664                 assignments[remove] = Ineligible(None);
1665                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1666             }
1667         }
1668
1669         // Count the number of variants in use. If only one of them, then it is
1670         // impossible to overlap any locals in our layout. In this case it's
1671         // always better to make the remaining locals ineligible, so we can
1672         // lay them out with the other locals in the prefix and eliminate
1673         // unnecessary padding bytes.
1674         {
1675             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1676             for assignment in &assignments {
1677                 if let Assigned(idx) = assignment {
1678                     used_variants.insert(*idx);
1679                 }
1680             }
1681             if used_variants.count() < 2 {
1682                 for assignment in assignments.iter_mut() {
1683                     *assignment = Ineligible(None);
1684                 }
1685                 ineligible_locals.insert_all();
1686             }
1687         }
1688
1689         // Write down the order of our locals that will be promoted to the prefix.
1690         {
1691             for (idx, local) in ineligible_locals.iter().enumerate() {
1692                 assignments[local] = Ineligible(Some(idx as u32));
1693             }
1694         }
1695         debug!("generator saved local assignments: {:?}", assignments);
1696
1697         (ineligible_locals, assignments)
1698     }
1699
1700     /// Compute the full generator layout.
1701     fn generator_layout(
1702         &self,
1703         ty: Ty<'tcx>,
1704         def_id: hir::def_id::DefId,
1705         substs: SubstsRef<'tcx>,
1706     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1707         use SavedLocalEligibility::*;
1708         let tcx = self.tcx;
1709         let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1710
1711         let Some(info) = tcx.generator_layout(def_id) else {
1712             return Err(LayoutError::Unknown(ty));
1713         };
1714         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1715
1716         // Build a prefix layout, including "promoting" all ineligible
1717         // locals as part of the prefix. We compute the layout of all of
1718         // these fields at once to get optimal packing.
1719         let tag_index = substs.as_generator().prefix_tys().count();
1720
1721         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1722         let max_discr = (info.variant_fields.len() - 1) as u128;
1723         let discr_int = Integer::fit_unsigned(max_discr);
1724         let discr_int_ty = discr_int.to_ty(tcx, false);
1725         let tag = Scalar::Initialized {
1726             value: Primitive::Int(discr_int, false),
1727             valid_range: WrappingRange { start: 0, end: max_discr },
1728         };
1729         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1730         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1731
1732         let promoted_layouts = ineligible_locals
1733             .iter()
1734             .map(|local| subst_field(info.field_tys[local]))
1735             .map(|ty| tcx.mk_maybe_uninit(ty))
1736             .map(|ty| self.layout_of(ty));
1737         let prefix_layouts = substs
1738             .as_generator()
1739             .prefix_tys()
1740             .map(|ty| self.layout_of(ty))
1741             .chain(iter::once(Ok(tag_layout)))
1742             .chain(promoted_layouts)
1743             .collect::<Result<Vec<_>, _>>()?;
1744         let prefix = self.univariant_uninterned(
1745             ty,
1746             &prefix_layouts,
1747             &ReprOptions::default(),
1748             StructKind::AlwaysSized,
1749         )?;
1750
1751         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1752
1753         // Split the prefix layout into the "outer" fields (upvars and
1754         // discriminant) and the "promoted" fields. Promoted fields will
1755         // get included in each variant that requested them in
1756         // GeneratorLayout.
1757         debug!("prefix = {:#?}", prefix);
1758         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1759             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1760                 let mut inverse_memory_index = invert_mapping(&memory_index);
1761
1762                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1763                 // "outer" and "promoted" fields respectively.
1764                 let b_start = (tag_index + 1) as u32;
1765                 let offsets_b = offsets.split_off(b_start as usize);
1766                 let offsets_a = offsets;
1767
1768                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1769                 // by preserving the order but keeping only one disjoint "half" each.
1770                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1771                 let inverse_memory_index_b: Vec<_> =
1772                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1773                 inverse_memory_index.retain(|&i| i < b_start);
1774                 let inverse_memory_index_a = inverse_memory_index;
1775
1776                 // Since `inverse_memory_index_{a,b}` each only refer to their
1777                 // respective fields, they can be safely inverted
1778                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1779                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1780
1781                 let outer_fields =
1782                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1783                 (outer_fields, offsets_b, memory_index_b)
1784             }
1785             _ => bug!(),
1786         };
1787
1788         let mut size = prefix.size;
1789         let mut align = prefix.align;
1790         let variants = info
1791             .variant_fields
1792             .iter_enumerated()
1793             .map(|(index, variant_fields)| {
1794                 // Only include overlap-eligible fields when we compute our variant layout.
1795                 let variant_only_tys = variant_fields
1796                     .iter()
1797                     .filter(|local| match assignments[**local] {
1798                         Unassigned => bug!(),
1799                         Assigned(v) if v == index => true,
1800                         Assigned(_) => bug!("assignment does not match variant"),
1801                         Ineligible(_) => false,
1802                     })
1803                     .map(|local| subst_field(info.field_tys[*local]));
1804
1805                 let mut variant = self.univariant_uninterned(
1806                     ty,
1807                     &variant_only_tys
1808                         .map(|ty| self.layout_of(ty))
1809                         .collect::<Result<Vec<_>, _>>()?,
1810                     &ReprOptions::default(),
1811                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1812                 )?;
1813                 variant.variants = Variants::Single { index };
1814
1815                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1816                     bug!();
1817                 };
1818
1819                 // Now, stitch the promoted and variant-only fields back together in
1820                 // the order they are mentioned by our GeneratorLayout.
1821                 // Because we only use some subset (that can differ between variants)
1822                 // of the promoted fields, we can't just pick those elements of the
1823                 // `promoted_memory_index` (as we'd end up with gaps).
1824                 // So instead, we build an "inverse memory_index", as if all of the
1825                 // promoted fields were being used, but leave the elements not in the
1826                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1827                 // obtain a valid (bijective) mapping.
1828                 const INVALID_FIELD_IDX: u32 = !0;
1829                 let mut combined_inverse_memory_index =
1830                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1831                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1832                 let combined_offsets = variant_fields
1833                     .iter()
1834                     .enumerate()
1835                     .map(|(i, local)| {
1836                         let (offset, memory_index) = match assignments[*local] {
1837                             Unassigned => bug!(),
1838                             Assigned(_) => {
1839                                 let (offset, memory_index) =
1840                                     offsets_and_memory_index.next().unwrap();
1841                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1842                             }
1843                             Ineligible(field_idx) => {
1844                                 let field_idx = field_idx.unwrap() as usize;
1845                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1846                             }
1847                         };
1848                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1849                         offset
1850                     })
1851                     .collect();
1852
1853                 // Remove the unused slots and invert the mapping to obtain the
1854                 // combined `memory_index` (also see previous comment).
1855                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1856                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1857
1858                 variant.fields = FieldsShape::Arbitrary {
1859                     offsets: combined_offsets,
1860                     memory_index: combined_memory_index,
1861                 };
1862
1863                 size = size.max(variant.size);
1864                 align = align.max(variant.align);
1865                 Ok(tcx.intern_layout(variant))
1866             })
1867             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1868
1869         size = size.align_to(align.abi);
1870
1871         let abi =
1872             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1873                 Abi::Uninhabited
1874             } else {
1875                 Abi::Aggregate { sized: true }
1876             };
1877
1878         let layout = tcx.intern_layout(LayoutS {
1879             variants: Variants::Multiple {
1880                 tag,
1881                 tag_encoding: TagEncoding::Direct,
1882                 tag_field: tag_index,
1883                 variants,
1884             },
1885             fields: outer_fields,
1886             abi,
1887             largest_niche: prefix.largest_niche,
1888             size,
1889             align,
1890         });
1891         debug!("generator layout ({:?}): {:#?}", ty, layout);
1892         Ok(layout)
1893     }
1894
1895     /// This is invoked by the `layout_of` query to record the final
1896     /// layout of each type.
1897     #[inline(always)]
1898     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1899         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1900         // for dumping later.
1901         if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1902             self.record_layout_for_printing_outlined(layout)
1903         }
1904     }
1905
1906     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1907         // Ignore layouts that are done with non-empty environments or
1908         // non-monomorphic layouts, as the user only wants to see the stuff
1909         // resulting from the final codegen session.
1910         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1911             return;
1912         }
1913
1914         // (delay format until we actually need it)
1915         let record = |kind, packed, opt_discr_size, variants| {
1916             let type_desc = format!("{:?}", layout.ty);
1917             self.tcx.sess.code_stats.record_type_size(
1918                 kind,
1919                 type_desc,
1920                 layout.align.abi,
1921                 layout.size,
1922                 packed,
1923                 opt_discr_size,
1924                 variants,
1925             );
1926         };
1927
1928         let adt_def = match *layout.ty.kind() {
1929             ty::Adt(ref adt_def, _) => {
1930                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1931                 adt_def
1932             }
1933
1934             ty::Closure(..) => {
1935                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1936                 record(DataTypeKind::Closure, false, None, vec![]);
1937                 return;
1938             }
1939
1940             _ => {
1941                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1942                 return;
1943             }
1944         };
1945
1946         let adt_kind = adt_def.adt_kind();
1947         let adt_packed = adt_def.repr().pack.is_some();
1948
1949         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1950             let mut min_size = Size::ZERO;
1951             let field_info: Vec<_> = flds
1952                 .iter()
1953                 .enumerate()
1954                 .map(|(i, &name)| {
1955                     let field_layout = layout.field(self, i);
1956                     let offset = layout.fields.offset(i);
1957                     let field_end = offset + field_layout.size;
1958                     if min_size < field_end {
1959                         min_size = field_end;
1960                     }
1961                     FieldInfo {
1962                         name,
1963                         offset: offset.bytes(),
1964                         size: field_layout.size.bytes(),
1965                         align: field_layout.align.abi.bytes(),
1966                     }
1967                 })
1968                 .collect();
1969
1970             VariantInfo {
1971                 name: n,
1972                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1973                 align: layout.align.abi.bytes(),
1974                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1975                 fields: field_info,
1976             }
1977         };
1978
1979         match layout.variants {
1980             Variants::Single { index } => {
1981                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1982                     debug!(
1983                         "print-type-size `{:#?}` variant {}",
1984                         layout,
1985                         adt_def.variant(index).name
1986                     );
1987                     let variant_def = &adt_def.variant(index);
1988                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1989                     record(
1990                         adt_kind.into(),
1991                         adt_packed,
1992                         None,
1993                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1994                     );
1995                 } else {
1996                     // (This case arises for *empty* enums; so give it
1997                     // zero variants.)
1998                     record(adt_kind.into(), adt_packed, None, vec![]);
1999                 }
2000             }
2001
2002             Variants::Multiple { tag, ref tag_encoding, .. } => {
2003                 debug!(
2004                     "print-type-size `{:#?}` adt general variants def {}",
2005                     layout.ty,
2006                     adt_def.variants().len()
2007                 );
2008                 let variant_infos: Vec<_> = adt_def
2009                     .variants()
2010                     .iter_enumerated()
2011                     .map(|(i, variant_def)| {
2012                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2013                         build_variant_info(
2014                             Some(variant_def.name),
2015                             &fields,
2016                             layout.for_variant(self, i),
2017                         )
2018                     })
2019                     .collect();
2020                 record(
2021                     adt_kind.into(),
2022                     adt_packed,
2023                     match tag_encoding {
2024                         TagEncoding::Direct => Some(tag.size(self)),
2025                         _ => None,
2026                     },
2027                     variant_infos,
2028                 );
2029             }
2030         }
2031     }
2032 }
2033
2034 /// Type size "skeleton", i.e., the only information determining a type's size.
2035 /// While this is conservative, (aside from constant sizes, only pointers,
2036 /// newtypes thereof and null pointer optimized enums are allowed), it is
2037 /// enough to statically check common use cases of transmute.
2038 #[derive(Copy, Clone, Debug)]
2039 pub enum SizeSkeleton<'tcx> {
2040     /// Any statically computable Layout.
2041     Known(Size),
2042
2043     /// A potentially-fat pointer.
2044     Pointer {
2045         /// If true, this pointer is never null.
2046         non_zero: bool,
2047         /// The type which determines the unsized metadata, if any,
2048         /// of this pointer. Either a type parameter or a projection
2049         /// depending on one, with regions erased.
2050         tail: Ty<'tcx>,
2051     },
2052 }
2053
2054 impl<'tcx> SizeSkeleton<'tcx> {
2055     pub fn compute(
2056         ty: Ty<'tcx>,
2057         tcx: TyCtxt<'tcx>,
2058         param_env: ty::ParamEnv<'tcx>,
2059     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2060         debug_assert!(!ty.has_infer_types_or_consts());
2061
2062         // First try computing a static layout.
2063         let err = match tcx.layout_of(param_env.and(ty)) {
2064             Ok(layout) => {
2065                 return Ok(SizeSkeleton::Known(layout.size));
2066             }
2067             Err(err) => err,
2068         };
2069
2070         match *ty.kind() {
2071             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2072                 let non_zero = !ty.is_unsafe_ptr();
2073                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2074                 match tail.kind() {
2075                     ty::Param(_) | ty::Projection(_) => {
2076                         debug_assert!(tail.has_param_types_or_consts());
2077                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2078                     }
2079                     _ => bug!(
2080                         "SizeSkeleton::compute({}): layout errored ({}), yet \
2081                               tail `{}` is not a type parameter or a projection",
2082                         ty,
2083                         err,
2084                         tail
2085                     ),
2086                 }
2087             }
2088
2089             ty::Adt(def, substs) => {
2090                 // Only newtypes and enums w/ nullable pointer optimization.
2091                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2092                     return Err(err);
2093                 }
2094
2095                 // Get a zero-sized variant or a pointer newtype.
2096                 let zero_or_ptr_variant = |i| {
2097                     let i = VariantIdx::new(i);
2098                     let fields =
2099                         def.variant(i).fields.iter().map(|field| {
2100                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2101                         });
2102                     let mut ptr = None;
2103                     for field in fields {
2104                         let field = field?;
2105                         match field {
2106                             SizeSkeleton::Known(size) => {
2107                                 if size.bytes() > 0 {
2108                                     return Err(err);
2109                                 }
2110                             }
2111                             SizeSkeleton::Pointer { .. } => {
2112                                 if ptr.is_some() {
2113                                     return Err(err);
2114                                 }
2115                                 ptr = Some(field);
2116                             }
2117                         }
2118                     }
2119                     Ok(ptr)
2120                 };
2121
2122                 let v0 = zero_or_ptr_variant(0)?;
2123                 // Newtype.
2124                 if def.variants().len() == 1 {
2125                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2126                         return Ok(SizeSkeleton::Pointer {
2127                             non_zero: non_zero
2128                                 || match tcx.layout_scalar_valid_range(def.did()) {
2129                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2130                                     (Bound::Included(start), Bound::Included(end)) => {
2131                                         0 < start && start < end
2132                                     }
2133                                     _ => false,
2134                                 },
2135                             tail,
2136                         });
2137                     } else {
2138                         return Err(err);
2139                     }
2140                 }
2141
2142                 let v1 = zero_or_ptr_variant(1)?;
2143                 // Nullable pointer enum optimization.
2144                 match (v0, v1) {
2145                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2146                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2147                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2148                     }
2149                     _ => Err(err),
2150                 }
2151             }
2152
2153             ty::Projection(_) | ty::Opaque(..) => {
2154                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2155                 if ty == normalized {
2156                     Err(err)
2157                 } else {
2158                     SizeSkeleton::compute(normalized, tcx, param_env)
2159                 }
2160             }
2161
2162             _ => Err(err),
2163         }
2164     }
2165
2166     pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2167         match (self, other) {
2168             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2169             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2170                 a == b
2171             }
2172             _ => false,
2173         }
2174     }
2175 }
2176
2177 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2178     fn tcx(&self) -> TyCtxt<'tcx>;
2179 }
2180
2181 pub trait HasParamEnv<'tcx> {
2182     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2183 }
2184
2185 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2186     #[inline]
2187     fn data_layout(&self) -> &TargetDataLayout {
2188         &self.data_layout
2189     }
2190 }
2191
2192 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2193     fn target_spec(&self) -> &Target {
2194         &self.sess.target
2195     }
2196 }
2197
2198 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2199     #[inline]
2200     fn tcx(&self) -> TyCtxt<'tcx> {
2201         *self
2202     }
2203 }
2204
2205 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2206     #[inline]
2207     fn data_layout(&self) -> &TargetDataLayout {
2208         &self.data_layout
2209     }
2210 }
2211
2212 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2213     fn target_spec(&self) -> &Target {
2214         &self.sess.target
2215     }
2216 }
2217
2218 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2219     #[inline]
2220     fn tcx(&self) -> TyCtxt<'tcx> {
2221         **self
2222     }
2223 }
2224
2225 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2226     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2227         self.param_env
2228     }
2229 }
2230
2231 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2232     fn data_layout(&self) -> &TargetDataLayout {
2233         self.tcx.data_layout()
2234     }
2235 }
2236
2237 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2238     fn target_spec(&self) -> &Target {
2239         self.tcx.target_spec()
2240     }
2241 }
2242
2243 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2244     fn tcx(&self) -> TyCtxt<'tcx> {
2245         self.tcx.tcx()
2246     }
2247 }
2248
2249 pub trait MaybeResult<T> {
2250     type Error;
2251
2252     fn from(x: Result<T, Self::Error>) -> Self;
2253     fn to_result(self) -> Result<T, Self::Error>;
2254 }
2255
2256 impl<T> MaybeResult<T> for T {
2257     type Error = !;
2258
2259     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2260         x
2261     }
2262     fn to_result(self) -> Result<T, Self::Error> {
2263         Ok(self)
2264     }
2265 }
2266
2267 impl<T, E> MaybeResult<T> for Result<T, E> {
2268     type Error = E;
2269
2270     fn from(x: Result<T, Self::Error>) -> Self {
2271         x
2272     }
2273     fn to_result(self) -> Result<T, Self::Error> {
2274         self
2275     }
2276 }
2277
2278 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2279
2280 /// Trait for contexts that want to be able to compute layouts of types.
2281 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2282 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2283     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2284     /// returned from `layout_of` (see also `handle_layout_err`).
2285     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2286
2287     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2288     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2289     #[inline]
2290     fn layout_tcx_at_span(&self) -> Span {
2291         DUMMY_SP
2292     }
2293
2294     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2295     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2296     ///
2297     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2298     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2299     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2300     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2301     fn handle_layout_err(
2302         &self,
2303         err: LayoutError<'tcx>,
2304         span: Span,
2305         ty: Ty<'tcx>,
2306     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2307 }
2308
2309 /// Blanket extension trait for contexts that can compute layouts of types.
2310 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2311     /// Computes the layout of a type. Note that this implicitly
2312     /// executes in "reveal all" mode, and will normalize the input type.
2313     #[inline]
2314     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2315         self.spanned_layout_of(ty, DUMMY_SP)
2316     }
2317
2318     /// Computes the layout of a type, at `span`. Note that this implicitly
2319     /// executes in "reveal all" mode, and will normalize the input type.
2320     // FIXME(eddyb) avoid passing information like this, and instead add more
2321     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2322     #[inline]
2323     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2324         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2325         let tcx = self.tcx().at(span);
2326
2327         MaybeResult::from(
2328             tcx.layout_of(self.param_env().and(ty))
2329                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2330         )
2331     }
2332 }
2333
2334 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2335
2336 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2337     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2338
2339     #[inline]
2340     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2341         err
2342     }
2343 }
2344
2345 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2346     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2347
2348     #[inline]
2349     fn layout_tcx_at_span(&self) -> Span {
2350         self.tcx.span
2351     }
2352
2353     #[inline]
2354     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2355         err
2356     }
2357 }
2358
2359 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2360 where
2361     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2362 {
2363     fn ty_and_layout_for_variant(
2364         this: TyAndLayout<'tcx>,
2365         cx: &C,
2366         variant_index: VariantIdx,
2367     ) -> TyAndLayout<'tcx> {
2368         let layout = match this.variants {
2369             Variants::Single { index }
2370                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2371                 if index == variant_index &&
2372                 // Don't confuse variants of uninhabited enums with the enum itself.
2373                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2374                 this.fields != FieldsShape::Primitive =>
2375             {
2376                 this.layout
2377             }
2378
2379             Variants::Single { index } => {
2380                 let tcx = cx.tcx();
2381                 let param_env = cx.param_env();
2382
2383                 // Deny calling for_variant more than once for non-Single enums.
2384                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2385                     assert_eq!(original_layout.variants, Variants::Single { index });
2386                 }
2387
2388                 let fields = match this.ty.kind() {
2389                     ty::Adt(def, _) if def.variants().is_empty() =>
2390                         bug!("for_variant called on zero-variant enum"),
2391                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2392                     _ => bug!(),
2393                 };
2394                 tcx.intern_layout(LayoutS {
2395                     variants: Variants::Single { index: variant_index },
2396                     fields: match NonZeroUsize::new(fields) {
2397                         Some(fields) => FieldsShape::Union(fields),
2398                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2399                     },
2400                     abi: Abi::Uninhabited,
2401                     largest_niche: None,
2402                     align: tcx.data_layout.i8_align,
2403                     size: Size::ZERO,
2404                 })
2405             }
2406
2407             Variants::Multiple { ref variants, .. } => variants[variant_index],
2408         };
2409
2410         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2411
2412         TyAndLayout { ty: this.ty, layout }
2413     }
2414
2415     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2416         enum TyMaybeWithLayout<'tcx> {
2417             Ty(Ty<'tcx>),
2418             TyAndLayout(TyAndLayout<'tcx>),
2419         }
2420
2421         fn field_ty_or_layout<'tcx>(
2422             this: TyAndLayout<'tcx>,
2423             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2424             i: usize,
2425         ) -> TyMaybeWithLayout<'tcx> {
2426             let tcx = cx.tcx();
2427             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2428                 TyAndLayout {
2429                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2430                     ty: tag.primitive().to_ty(tcx),
2431                 }
2432             };
2433
2434             match *this.ty.kind() {
2435                 ty::Bool
2436                 | ty::Char
2437                 | ty::Int(_)
2438                 | ty::Uint(_)
2439                 | ty::Float(_)
2440                 | ty::FnPtr(_)
2441                 | ty::Never
2442                 | ty::FnDef(..)
2443                 | ty::GeneratorWitness(..)
2444                 | ty::Foreign(..)
2445                 | ty::Dynamic(_, _, ty::Dyn) => {
2446                     bug!("TyAndLayout::field({:?}): not applicable", this)
2447                 }
2448
2449                 // Potentially-fat pointers.
2450                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2451                     assert!(i < this.fields.count());
2452
2453                     // Reuse the fat `*T` type as its own thin pointer data field.
2454                     // This provides information about, e.g., DST struct pointees
2455                     // (which may have no non-DST form), and will work as long
2456                     // as the `Abi` or `FieldsShape` is checked by users.
2457                     if i == 0 {
2458                         let nil = tcx.mk_unit();
2459                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2460                             tcx.mk_mut_ptr(nil)
2461                         } else {
2462                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2463                         };
2464
2465                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2466                         // the `Result` should always work because the type is
2467                         // always either `*mut ()` or `&'static mut ()`.
2468                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2469                             ty: this.ty,
2470                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2471                         });
2472                     }
2473
2474                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2475                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2476                         ty::Dynamic(_, _, ty::Dyn) => {
2477                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2478                                 tcx.lifetimes.re_static,
2479                                 tcx.mk_array(tcx.types.usize, 3),
2480                             ))
2481                             /* FIXME: use actual fn pointers
2482                             Warning: naively computing the number of entries in the
2483                             vtable by counting the methods on the trait + methods on
2484                             all parent traits does not work, because some methods can
2485                             be not object safe and thus excluded from the vtable.
2486                             Increase this counter if you tried to implement this but
2487                             failed to do it without duplicating a lot of code from
2488                             other places in the compiler: 2
2489                             tcx.mk_tup(&[
2490                                 tcx.mk_array(tcx.types.usize, 3),
2491                                 tcx.mk_array(Option<fn()>),
2492                             ])
2493                             */
2494                         }
2495                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2496                     }
2497                 }
2498
2499                 // Arrays and slices.
2500                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2501                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2502
2503                 // Tuples, generators and closures.
2504                 ty::Closure(_, ref substs) => field_ty_or_layout(
2505                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2506                     cx,
2507                     i,
2508                 ),
2509
2510                 ty::Generator(def_id, ref substs, _) => match this.variants {
2511                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2512                         substs
2513                             .as_generator()
2514                             .state_tys(def_id, tcx)
2515                             .nth(index.as_usize())
2516                             .unwrap()
2517                             .nth(i)
2518                             .unwrap(),
2519                     ),
2520                     Variants::Multiple { tag, tag_field, .. } => {
2521                         if i == tag_field {
2522                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2523                         }
2524                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2525                     }
2526                 },
2527
2528                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2529
2530                 // ADTs.
2531                 ty::Adt(def, substs) => {
2532                     match this.variants {
2533                         Variants::Single { index } => {
2534                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2535                         }
2536
2537                         // Discriminant field for enums (where applicable).
2538                         Variants::Multiple { tag, .. } => {
2539                             assert_eq!(i, 0);
2540                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2541                         }
2542                     }
2543                 }
2544
2545                 ty::Dynamic(_, _, ty::DynStar) => {
2546                     if i == 0 {
2547                         TyMaybeWithLayout::Ty(tcx.types.usize)
2548                     } else if i == 1 {
2549                         // FIXME(dyn-star) same FIXME as above applies here too
2550                         TyMaybeWithLayout::Ty(
2551                             tcx.mk_imm_ref(
2552                                 tcx.lifetimes.re_static,
2553                                 tcx.mk_array(tcx.types.usize, 3),
2554                             ),
2555                         )
2556                     } else {
2557                         bug!("no field {i} on dyn*")
2558                     }
2559                 }
2560
2561                 ty::Projection(_)
2562                 | ty::Bound(..)
2563                 | ty::Placeholder(..)
2564                 | ty::Opaque(..)
2565                 | ty::Param(_)
2566                 | ty::Infer(_)
2567                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2568             }
2569         }
2570
2571         match field_ty_or_layout(this, cx, i) {
2572             TyMaybeWithLayout::Ty(field_ty) => {
2573                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2574                     bug!(
2575                         "failed to get layout for `{}`: {},\n\
2576                          despite it being a field (#{}) of an existing layout: {:#?}",
2577                         field_ty,
2578                         e,
2579                         i,
2580                         this
2581                     )
2582                 })
2583             }
2584             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2585         }
2586     }
2587
2588     fn ty_and_layout_pointee_info_at(
2589         this: TyAndLayout<'tcx>,
2590         cx: &C,
2591         offset: Size,
2592     ) -> Option<PointeeInfo> {
2593         let tcx = cx.tcx();
2594         let param_env = cx.param_env();
2595
2596         let addr_space_of_ty = |ty: Ty<'tcx>| {
2597             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2598         };
2599
2600         let pointee_info = match *this.ty.kind() {
2601             ty::RawPtr(mt) if offset.bytes() == 0 => {
2602                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2603                     size: layout.size,
2604                     align: layout.align.abi,
2605                     safe: None,
2606                     address_space: addr_space_of_ty(mt.ty),
2607                 })
2608             }
2609             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2610                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2611                     size: layout.size,
2612                     align: layout.align.abi,
2613                     safe: None,
2614                     address_space: cx.data_layout().instruction_address_space,
2615                 })
2616             }
2617             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2618                 let address_space = addr_space_of_ty(ty);
2619                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2620                     // Use conservative pointer kind if not optimizing. This saves us the
2621                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2622                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2623                     PointerKind::SharedMutable
2624                 } else {
2625                     match mt {
2626                         hir::Mutability::Not => {
2627                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2628                                 PointerKind::Frozen
2629                             } else {
2630                                 PointerKind::SharedMutable
2631                             }
2632                         }
2633                         hir::Mutability::Mut => {
2634                             // References to self-referential structures should not be considered
2635                             // noalias, as another pointer to the structure can be obtained, that
2636                             // is not based-on the original reference. We consider all !Unpin
2637                             // types to be potentially self-referential here.
2638                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2639                                 PointerKind::UniqueBorrowed
2640                             } else {
2641                                 PointerKind::UniqueBorrowedPinned
2642                             }
2643                         }
2644                     }
2645                 };
2646
2647                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2648                     size: layout.size,
2649                     align: layout.align.abi,
2650                     safe: Some(kind),
2651                     address_space,
2652                 })
2653             }
2654
2655             _ => {
2656                 let mut data_variant = match this.variants {
2657                     // Within the discriminant field, only the niche itself is
2658                     // always initialized, so we only check for a pointer at its
2659                     // offset.
2660                     //
2661                     // If the niche is a pointer, it's either valid (according
2662                     // to its type), or null (which the niche field's scalar
2663                     // validity range encodes).  This allows using
2664                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2665                     // this will continue to work as long as we don't start
2666                     // using more niches than just null (e.g., the first page of
2667                     // the address space, or unaligned pointers).
2668                     Variants::Multiple {
2669                         tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2670                         tag_field,
2671                         ..
2672                     } if this.fields.offset(tag_field) == offset => {
2673                         Some(this.for_variant(cx, untagged_variant))
2674                     }
2675                     _ => Some(this),
2676                 };
2677
2678                 if let Some(variant) = data_variant {
2679                     // We're not interested in any unions.
2680                     if let FieldsShape::Union(_) = variant.fields {
2681                         data_variant = None;
2682                     }
2683                 }
2684
2685                 let mut result = None;
2686
2687                 if let Some(variant) = data_variant {
2688                     let ptr_end = offset + Pointer.size(cx);
2689                     for i in 0..variant.fields.count() {
2690                         let field_start = variant.fields.offset(i);
2691                         if field_start <= offset {
2692                             let field = variant.field(cx, i);
2693                             result = field.to_result().ok().and_then(|field| {
2694                                 if ptr_end <= field_start + field.size {
2695                                     // We found the right field, look inside it.
2696                                     let field_info =
2697                                         field.pointee_info_at(cx, offset - field_start);
2698                                     field_info
2699                                 } else {
2700                                     None
2701                                 }
2702                             });
2703                             if result.is_some() {
2704                                 break;
2705                             }
2706                         }
2707                     }
2708                 }
2709
2710                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2711                 if let Some(ref mut pointee) = result {
2712                     if let ty::Adt(def, _) = this.ty.kind() {
2713                         if def.is_box() && offset.bytes() == 0 {
2714                             pointee.safe = Some(PointerKind::UniqueOwned);
2715                         }
2716                     }
2717                 }
2718
2719                 result
2720             }
2721         };
2722
2723         debug!(
2724             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2725             offset,
2726             this.ty.kind(),
2727             pointee_info
2728         );
2729
2730         pointee_info
2731     }
2732
2733     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2734         matches!(this.ty.kind(), ty::Adt(..))
2735     }
2736
2737     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2738         this.ty.kind() == &ty::Never
2739     }
2740
2741     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2742         matches!(this.ty.kind(), ty::Tuple(..))
2743     }
2744
2745     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2746         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2747     }
2748 }
2749
2750 impl<'tcx> ty::Instance<'tcx> {
2751     // NOTE(eddyb) this is private to avoid using it from outside of
2752     // `fn_abi_of_instance` - any other uses are either too high-level
2753     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2754     // or should go through `FnAbi` instead, to avoid losing any
2755     // adjustments `fn_abi_of_instance` might be performing.
2756     #[tracing::instrument(level = "debug", skip(tcx, param_env))]
2757     fn fn_sig_for_fn_abi(
2758         &self,
2759         tcx: TyCtxt<'tcx>,
2760         param_env: ty::ParamEnv<'tcx>,
2761     ) -> ty::PolyFnSig<'tcx> {
2762         let ty = self.ty(tcx, param_env);
2763         match *ty.kind() {
2764             ty::FnDef(..) => {
2765                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2766                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2767                 // (i.e. due to being inside a projection that got normalized, see
2768                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2769                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2770                 //
2771                 // We normalize the `fn_sig` again after substituting at a later point.
2772                 let mut sig = match *ty.kind() {
2773                     ty::FnDef(def_id, substs) => tcx
2774                         .bound_fn_sig(def_id)
2775                         .map_bound(|fn_sig| {
2776                             tcx.normalize_erasing_regions(tcx.param_env(def_id), fn_sig)
2777                         })
2778                         .subst(tcx, substs),
2779                     _ => unreachable!(),
2780                 };
2781
2782                 if let ty::InstanceDef::VTableShim(..) = self.def {
2783                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2784                     sig = sig.map_bound(|mut sig| {
2785                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2786                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2787                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2788                         sig
2789                     });
2790                 }
2791                 sig
2792             }
2793             ty::Closure(def_id, substs) => {
2794                 let sig = substs.as_closure().sig();
2795
2796                 let bound_vars = tcx.mk_bound_variable_kinds(
2797                     sig.bound_vars()
2798                         .iter()
2799                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2800                 );
2801                 let br = ty::BoundRegion {
2802                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2803                     kind: ty::BoundRegionKind::BrEnv,
2804                 };
2805                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2806                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2807
2808                 let sig = sig.skip_binder();
2809                 ty::Binder::bind_with_vars(
2810                     tcx.mk_fn_sig(
2811                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2812                         sig.output(),
2813                         sig.c_variadic,
2814                         sig.unsafety,
2815                         sig.abi,
2816                     ),
2817                     bound_vars,
2818                 )
2819             }
2820             ty::Generator(_, substs, _) => {
2821                 let sig = substs.as_generator().poly_sig();
2822
2823                 let bound_vars = tcx.mk_bound_variable_kinds(
2824                     sig.bound_vars()
2825                         .iter()
2826                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2827                 );
2828                 let br = ty::BoundRegion {
2829                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2830                     kind: ty::BoundRegionKind::BrEnv,
2831                 };
2832                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2833                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2834
2835                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2836                 let pin_adt_ref = tcx.adt_def(pin_did);
2837                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2838                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2839
2840                 let sig = sig.skip_binder();
2841                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2842                 let state_adt_ref = tcx.adt_def(state_did);
2843                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2844                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2845                 ty::Binder::bind_with_vars(
2846                     tcx.mk_fn_sig(
2847                         [env_ty, sig.resume_ty].iter(),
2848                         &ret_ty,
2849                         false,
2850                         hir::Unsafety::Normal,
2851                         rustc_target::spec::abi::Abi::Rust,
2852                     ),
2853                     bound_vars,
2854                 )
2855             }
2856             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2857         }
2858     }
2859 }
2860
2861 /// Calculates whether a function's ABI can unwind or not.
2862 ///
2863 /// This takes two primary parameters:
2864 ///
2865 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2866 ///   codegen attrs for a defined function. For function pointers this set of
2867 ///   flags is the empty set. This is only applicable for Rust-defined
2868 ///   functions, and generally isn't needed except for small optimizations where
2869 ///   we try to say a function which otherwise might look like it could unwind
2870 ///   doesn't actually unwind (such as for intrinsics and such).
2871 ///
2872 /// * `abi` - this is the ABI that the function is defined with. This is the
2873 ///   primary factor for determining whether a function can unwind or not.
2874 ///
2875 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2876 /// panics are implemented with unwinds on most platform (when
2877 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2878 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2879 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2880 /// defined for each ABI individually, but it always corresponds to some form of
2881 /// stack-based unwinding (the exact mechanism of which varies
2882 /// platform-by-platform).
2883 ///
2884 /// Rust functions are classified whether or not they can unwind based on the
2885 /// active "panic strategy". In other words Rust functions are considered to
2886 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2887 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2888 /// only if the final panic mode is panic=abort. In this scenario any code
2889 /// previously compiled assuming that a function can unwind is still correct, it
2890 /// just never happens to actually unwind at runtime.
2891 ///
2892 /// This function's answer to whether or not a function can unwind is quite
2893 /// impactful throughout the compiler. This affects things like:
2894 ///
2895 /// * Calling a function which can't unwind means codegen simply ignores any
2896 ///   associated unwinding cleanup.
2897 /// * Calling a function which can unwind from a function which can't unwind
2898 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2899 ///   aborts the process.
2900 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2901 ///   affects various optimizations and codegen.
2902 ///
2903 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2904 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2905 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2906 /// might (from a foreign exception or similar).
2907 #[inline]
2908 #[tracing::instrument(level = "debug", skip(tcx))]
2909 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2910     if let Some(did) = fn_def_id {
2911         // Special attribute for functions which can't unwind.
2912         if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2913             return false;
2914         }
2915
2916         // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2917         //
2918         // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2919         // function defined in Rust is also required to abort.
2920         if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2921             return false;
2922         }
2923
2924         // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2925         //
2926         // This is not part of `codegen_fn_attrs` as it can differ between crates
2927         // and therefore cannot be computed in core.
2928         if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2929             if Some(did) == tcx.lang_items().drop_in_place_fn() {
2930                 return false;
2931             }
2932         }
2933     }
2934
2935     // Otherwise if this isn't special then unwinding is generally determined by
2936     // the ABI of the itself. ABIs like `C` have variants which also
2937     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2938     // ABIs have such an option. Otherwise the only other thing here is Rust
2939     // itself, and those ABIs are determined by the panic strategy configured
2940     // for this compilation.
2941     //
2942     // Unfortunately at this time there's also another caveat. Rust [RFC
2943     // 2945][rfc] has been accepted and is in the process of being implemented
2944     // and stabilized. In this interim state we need to deal with historical
2945     // rustc behavior as well as plan for future rustc behavior.
2946     //
2947     // Historically functions declared with `extern "C"` were marked at the
2948     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2949     // or not. This is UB for functions in `panic=unwind` mode that then
2950     // actually panic and unwind. Note that this behavior is true for both
2951     // externally declared functions as well as Rust-defined function.
2952     //
2953     // To fix this UB rustc would like to change in the future to catch unwinds
2954     // from function calls that may unwind within a Rust-defined `extern "C"`
2955     // function and forcibly abort the process, thereby respecting the
2956     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2957     // ready to roll out, so determining whether or not the `C` family of ABIs
2958     // unwinds is conditional not only on their definition but also whether the
2959     // `#![feature(c_unwind)]` feature gate is active.
2960     //
2961     // Note that this means that unlike historical compilers rustc now, by
2962     // default, unconditionally thinks that the `C` ABI may unwind. This will
2963     // prevent some optimization opportunities, however, so we try to scope this
2964     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2965     // to `panic=abort`).
2966     //
2967     // Eventually the check against `c_unwind` here will ideally get removed and
2968     // this'll be a little cleaner as it'll be a straightforward check of the
2969     // ABI.
2970     //
2971     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2972     use SpecAbi::*;
2973     match abi {
2974         C { unwind }
2975         | System { unwind }
2976         | Cdecl { unwind }
2977         | Stdcall { unwind }
2978         | Fastcall { unwind }
2979         | Vectorcall { unwind }
2980         | Thiscall { unwind }
2981         | Aapcs { unwind }
2982         | Win64 { unwind }
2983         | SysV64 { unwind } => {
2984             unwind
2985                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2986         }
2987         PtxKernel
2988         | Msp430Interrupt
2989         | X86Interrupt
2990         | AmdGpuKernel
2991         | EfiApi
2992         | AvrInterrupt
2993         | AvrNonBlockingInterrupt
2994         | CCmseNonSecureCall
2995         | Wasm
2996         | RustIntrinsic
2997         | PlatformIntrinsic
2998         | Unadjusted => false,
2999         Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
3000     }
3001 }
3002
3003 #[inline]
3004 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
3005     use rustc_target::spec::abi::Abi::*;
3006     match tcx.sess.target.adjust_abi(abi) {
3007         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
3008         RustCold => Conv::RustCold,
3009
3010         // It's the ABI's job to select this, not ours.
3011         System { .. } => bug!("system abi should be selected elsewhere"),
3012         EfiApi => bug!("eficall abi should be selected elsewhere"),
3013
3014         Stdcall { .. } => Conv::X86Stdcall,
3015         Fastcall { .. } => Conv::X86Fastcall,
3016         Vectorcall { .. } => Conv::X86VectorCall,
3017         Thiscall { .. } => Conv::X86ThisCall,
3018         C { .. } => Conv::C,
3019         Unadjusted => Conv::C,
3020         Win64 { .. } => Conv::X86_64Win64,
3021         SysV64 { .. } => Conv::X86_64SysV,
3022         Aapcs { .. } => Conv::ArmAapcs,
3023         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
3024         PtxKernel => Conv::PtxKernel,
3025         Msp430Interrupt => Conv::Msp430Intr,
3026         X86Interrupt => Conv::X86Intr,
3027         AmdGpuKernel => Conv::AmdGpuKernel,
3028         AvrInterrupt => Conv::AvrInterrupt,
3029         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3030         Wasm => Conv::C,
3031
3032         // These API constants ought to be more specific...
3033         Cdecl { .. } => Conv::C,
3034     }
3035 }
3036
3037 /// Error produced by attempting to compute or adjust a `FnAbi`.
3038 #[derive(Copy, Clone, Debug, HashStable)]
3039 pub enum FnAbiError<'tcx> {
3040     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3041     Layout(LayoutError<'tcx>),
3042
3043     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3044     AdjustForForeignAbi(call::AdjustForForeignAbiError),
3045 }
3046
3047 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3048     fn from(err: LayoutError<'tcx>) -> Self {
3049         Self::Layout(err)
3050     }
3051 }
3052
3053 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3054     fn from(err: call::AdjustForForeignAbiError) -> Self {
3055         Self::AdjustForForeignAbi(err)
3056     }
3057 }
3058
3059 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3060     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3061         match self {
3062             Self::Layout(err) => err.fmt(f),
3063             Self::AdjustForForeignAbi(err) => err.fmt(f),
3064         }
3065     }
3066 }
3067
3068 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3069 // just for error handling.
3070 #[derive(Debug)]
3071 pub enum FnAbiRequest<'tcx> {
3072     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3073     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3074 }
3075
3076 /// Trait for contexts that want to be able to compute `FnAbi`s.
3077 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3078 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3079     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3080     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3081     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3082
3083     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3084     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3085     ///
3086     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3087     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3088     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3089     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3090     fn handle_fn_abi_err(
3091         &self,
3092         err: FnAbiError<'tcx>,
3093         span: Span,
3094         fn_abi_request: FnAbiRequest<'tcx>,
3095     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3096 }
3097
3098 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3099 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3100     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3101     ///
3102     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3103     /// instead, where the instance is an `InstanceDef::Virtual`.
3104     #[inline]
3105     fn fn_abi_of_fn_ptr(
3106         &self,
3107         sig: ty::PolyFnSig<'tcx>,
3108         extra_args: &'tcx ty::List<Ty<'tcx>>,
3109     ) -> Self::FnAbiOfResult {
3110         // FIXME(eddyb) get a better `span` here.
3111         let span = self.layout_tcx_at_span();
3112         let tcx = self.tcx().at(span);
3113
3114         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3115             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3116         ))
3117     }
3118
3119     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3120     /// direct calls to an `fn`.
3121     ///
3122     /// NB: that includes virtual calls, which are represented by "direct calls"
3123     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3124     #[inline]
3125     #[tracing::instrument(level = "debug", skip(self))]
3126     fn fn_abi_of_instance(
3127         &self,
3128         instance: ty::Instance<'tcx>,
3129         extra_args: &'tcx ty::List<Ty<'tcx>>,
3130     ) -> Self::FnAbiOfResult {
3131         // FIXME(eddyb) get a better `span` here.
3132         let span = self.layout_tcx_at_span();
3133         let tcx = self.tcx().at(span);
3134
3135         MaybeResult::from(
3136             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3137                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3138                 // we can get some kind of span even if one wasn't provided.
3139                 // However, we don't do this early in order to avoid calling
3140                 // `def_span` unconditionally (which may have a perf penalty).
3141                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3142                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3143             }),
3144         )
3145     }
3146 }
3147
3148 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3149
3150 fn fn_abi_of_fn_ptr<'tcx>(
3151     tcx: TyCtxt<'tcx>,
3152     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3153 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3154     let (param_env, (sig, extra_args)) = query.into_parts();
3155
3156     LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3157 }
3158
3159 fn fn_abi_of_instance<'tcx>(
3160     tcx: TyCtxt<'tcx>,
3161     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3162 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3163     let (param_env, (instance, extra_args)) = query.into_parts();
3164
3165     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3166
3167     let caller_location = if instance.def.requires_caller_location(tcx) {
3168         Some(tcx.caller_location_ty())
3169     } else {
3170         None
3171     };
3172
3173     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3174         sig,
3175         extra_args,
3176         caller_location,
3177         Some(instance.def_id()),
3178         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3179     )
3180 }
3181
3182 // Handle safe Rust thin and fat pointers.
3183 pub fn adjust_for_rust_scalar<'tcx>(
3184     cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3185     attrs: &mut ArgAttributes,
3186     scalar: Scalar,
3187     layout: TyAndLayout<'tcx>,
3188     offset: Size,
3189     is_return: bool,
3190 ) {
3191     // Booleans are always a noundef i1 that needs to be zero-extended.
3192     if scalar.is_bool() {
3193         attrs.ext(ArgExtension::Zext);
3194         attrs.set(ArgAttribute::NoUndef);
3195         return;
3196     }
3197
3198     // Scalars which have invalid values cannot be undef.
3199     if !scalar.is_always_valid(&cx) {
3200         attrs.set(ArgAttribute::NoUndef);
3201     }
3202
3203     // Only pointer types handled below.
3204     let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3205
3206     if !valid_range.contains(0) {
3207         attrs.set(ArgAttribute::NonNull);
3208     }
3209
3210     if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3211         if let Some(kind) = pointee.safe {
3212             attrs.pointee_align = Some(pointee.align);
3213
3214             // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3215             // for the entire duration of the function as they can be deallocated
3216             // at any time. Same for shared mutable references. If LLVM had a
3217             // way to say "dereferenceable on entry" we could use it here.
3218             attrs.pointee_size = match kind {
3219                 PointerKind::UniqueBorrowed
3220                 | PointerKind::UniqueBorrowedPinned
3221                 | PointerKind::Frozen => pointee.size,
3222                 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3223             };
3224
3225             // `Box`, `&T`, and `&mut T` cannot be undef.
3226             // Note that this only applies to the value of the pointer itself;
3227             // this attribute doesn't make it UB for the pointed-to data to be undef.
3228             attrs.set(ArgAttribute::NoUndef);
3229
3230             // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3231             // `noalias` for it. This can be turned off using an unstable flag.
3232             // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3233             let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3234
3235             // `&mut` pointer parameters never alias other parameters,
3236             // or mutable global data
3237             //
3238             // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3239             // and can be marked as both `readonly` and `noalias`, as
3240             // LLVM's definition of `noalias` is based solely on memory
3241             // dependencies rather than pointer equality
3242             //
3243             // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3244             // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3245             // or not to actually emit the attribute. It can also be controlled with the
3246             // `-Zmutable-noalias` debugging option.
3247             let no_alias = match kind {
3248                 PointerKind::SharedMutable
3249                 | PointerKind::UniqueBorrowed
3250                 | PointerKind::UniqueBorrowedPinned => false,
3251                 PointerKind::UniqueOwned => noalias_for_box,
3252                 PointerKind::Frozen => !is_return,
3253             };
3254             if no_alias {
3255                 attrs.set(ArgAttribute::NoAlias);
3256             }
3257
3258             if kind == PointerKind::Frozen && !is_return {
3259                 attrs.set(ArgAttribute::ReadOnly);
3260             }
3261
3262             if kind == PointerKind::UniqueBorrowed && !is_return {
3263                 attrs.set(ArgAttribute::NoAliasMutRef);
3264             }
3265         }
3266     }
3267 }
3268
3269 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3270     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3271     // arguments of this method, into a separate `struct`.
3272     #[tracing::instrument(
3273         level = "debug",
3274         skip(self, caller_location, fn_def_id, force_thin_self_ptr)
3275     )]
3276     fn fn_abi_new_uncached(
3277         &self,
3278         sig: ty::PolyFnSig<'tcx>,
3279         extra_args: &[Ty<'tcx>],
3280         caller_location: Option<Ty<'tcx>>,
3281         fn_def_id: Option<DefId>,
3282         // FIXME(eddyb) replace this with something typed, like an `enum`.
3283         force_thin_self_ptr: bool,
3284     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3285         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3286
3287         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3288
3289         let mut inputs = sig.inputs();
3290         let extra_args = if sig.abi == RustCall {
3291             assert!(!sig.c_variadic && extra_args.is_empty());
3292
3293             if let Some(input) = sig.inputs().last() {
3294                 if let ty::Tuple(tupled_arguments) = input.kind() {
3295                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3296                     tupled_arguments
3297                 } else {
3298                     bug!(
3299                         "argument to function with \"rust-call\" ABI \
3300                             is not a tuple"
3301                     );
3302                 }
3303             } else {
3304                 bug!(
3305                     "argument to function with \"rust-call\" ABI \
3306                         is not a tuple"
3307                 );
3308             }
3309         } else {
3310             assert!(sig.c_variadic || extra_args.is_empty());
3311             extra_args
3312         };
3313
3314         let target = &self.tcx.sess.target;
3315         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3316         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3317         let linux_s390x_gnu_like =
3318             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3319         let linux_sparc64_gnu_like =
3320             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3321         let linux_powerpc_gnu_like =
3322             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3323         use SpecAbi::*;
3324         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3325
3326         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3327             let span = tracing::debug_span!("arg_of");
3328             let _entered = span.enter();
3329             let is_return = arg_idx.is_none();
3330
3331             let layout = self.layout_of(ty)?;
3332             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3333                 // Don't pass the vtable, it's not an argument of the virtual fn.
3334                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3335                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3336                 make_thin_self_ptr(self, layout)
3337             } else {
3338                 layout
3339             };
3340
3341             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3342                 let mut attrs = ArgAttributes::new();
3343                 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3344                 attrs
3345             });
3346
3347             if arg.layout.is_zst() {
3348                 // For some forsaken reason, x86_64-pc-windows-gnu
3349                 // doesn't ignore zero-sized struct arguments.
3350                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3351                 if is_return
3352                     || rust_abi
3353                     || (!win_x64_gnu
3354                         && !linux_s390x_gnu_like
3355                         && !linux_sparc64_gnu_like
3356                         && !linux_powerpc_gnu_like)
3357                 {
3358                     arg.mode = PassMode::Ignore;
3359                 }
3360             }
3361
3362             Ok(arg)
3363         };
3364
3365         let mut fn_abi = FnAbi {
3366             ret: arg_of(sig.output(), None)?,
3367             args: inputs
3368                 .iter()
3369                 .copied()
3370                 .chain(extra_args.iter().copied())
3371                 .chain(caller_location)
3372                 .enumerate()
3373                 .map(|(i, ty)| arg_of(ty, Some(i)))
3374                 .collect::<Result<_, _>>()?,
3375             c_variadic: sig.c_variadic,
3376             fixed_count: inputs.len() as u32,
3377             conv,
3378             can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3379         };
3380         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3381         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3382         Ok(self.tcx.arena.alloc(fn_abi))
3383     }
3384
3385     #[tracing::instrument(level = "trace", skip(self))]
3386     fn fn_abi_adjust_for_abi(
3387         &self,
3388         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3389         abi: SpecAbi,
3390     ) -> Result<(), FnAbiError<'tcx>> {
3391         if abi == SpecAbi::Unadjusted {
3392             return Ok(());
3393         }
3394
3395         if abi == SpecAbi::Rust
3396             || abi == SpecAbi::RustCall
3397             || abi == SpecAbi::RustIntrinsic
3398             || abi == SpecAbi::PlatformIntrinsic
3399         {
3400             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3401                 if arg.is_ignore() {
3402                     return;
3403                 }
3404
3405                 match arg.layout.abi {
3406                     Abi::Aggregate { .. } => {}
3407
3408                     // This is a fun case! The gist of what this is doing is
3409                     // that we want callers and callees to always agree on the
3410                     // ABI of how they pass SIMD arguments. If we were to *not*
3411                     // make these arguments indirect then they'd be immediates
3412                     // in LLVM, which means that they'd used whatever the
3413                     // appropriate ABI is for the callee and the caller. That
3414                     // means, for example, if the caller doesn't have AVX
3415                     // enabled but the callee does, then passing an AVX argument
3416                     // across this boundary would cause corrupt data to show up.
3417                     //
3418                     // This problem is fixed by unconditionally passing SIMD
3419                     // arguments through memory between callers and callees
3420                     // which should get them all to agree on ABI regardless of
3421                     // target feature sets. Some more information about this
3422                     // issue can be found in #44367.
3423                     //
3424                     // Note that the platform intrinsic ABI is exempt here as
3425                     // that's how we connect up to LLVM and it's unstable
3426                     // anyway, we control all calls to it in libstd.
3427                     Abi::Vector { .. }
3428                         if abi != SpecAbi::PlatformIntrinsic
3429                             && self.tcx.sess.target.simd_types_indirect =>
3430                     {
3431                         arg.make_indirect();
3432                         return;
3433                     }
3434
3435                     _ => return,
3436                 }
3437
3438                 let size = arg.layout.size;
3439                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3440                     arg.make_indirect();
3441                 } else {
3442                     // We want to pass small aggregates as immediates, but using
3443                     // a LLVM aggregate type for this leads to bad optimizations,
3444                     // so we pick an appropriately sized integer type instead.
3445                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3446                 }
3447             };
3448             fixup(&mut fn_abi.ret);
3449             for arg in fn_abi.args.iter_mut() {
3450                 fixup(arg);
3451             }
3452         } else {
3453             fn_abi.adjust_for_foreign_abi(self, abi)?;
3454         }
3455
3456         Ok(())
3457     }
3458 }
3459
3460 #[tracing::instrument(level = "debug", skip(cx))]
3461 fn make_thin_self_ptr<'tcx>(
3462     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3463     layout: TyAndLayout<'tcx>,
3464 ) -> TyAndLayout<'tcx> {
3465     let tcx = cx.tcx();
3466     let fat_pointer_ty = if layout.is_unsized() {
3467         // unsized `self` is passed as a pointer to `self`
3468         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3469         tcx.mk_mut_ptr(layout.ty)
3470     } else {
3471         match layout.abi {
3472             Abi::ScalarPair(..) | Abi::Scalar(..) => (),
3473             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3474         }
3475
3476         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3477         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3478         // elsewhere in the compiler as a method on a `dyn Trait`.
3479         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3480         // get a built-in pointer type
3481         let mut fat_pointer_layout = layout;
3482         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3483             && !fat_pointer_layout.ty.is_region_ptr()
3484         {
3485             for i in 0..fat_pointer_layout.fields.count() {
3486                 let field_layout = fat_pointer_layout.field(cx, i);
3487
3488                 if !field_layout.is_zst() {
3489                     fat_pointer_layout = field_layout;
3490                     continue 'descend_newtypes;
3491                 }
3492             }
3493
3494             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3495         }
3496
3497         fat_pointer_layout.ty
3498     };
3499
3500     // we now have a type like `*mut RcBox<dyn Trait>`
3501     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3502     // this is understood as a special case elsewhere in the compiler
3503     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3504
3505     TyAndLayout {
3506         ty: fat_pointer_ty,
3507
3508         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3509         // should always work because the type is always `*mut ()`.
3510         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3511     }
3512 }