]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
61f6dab1b2145332322fd174384672a3deb59f43
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_hir as hir;
9 use rustc_hir::def_id::DefId;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
18 };
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
21
22 use std::cmp;
23 use std::fmt;
24 use std::iter;
25 use std::num::NonZeroUsize;
26 use std::ops::Bound;
27
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
30
31 pub fn provide(providers: &mut ty::query::Providers) {
32     *providers =
33         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
34 }
35
36 pub trait IntegerExt {
37     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
41     fn repr_discr<'tcx>(
42         tcx: TyCtxt<'tcx>,
43         ty: Ty<'tcx>,
44         repr: &ReprOptions,
45         min: i128,
46         max: i128,
47     ) -> (Integer, bool);
48 }
49
50 impl IntegerExt for Integer {
51     #[inline]
52     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53         match (*self, signed) {
54             (I8, false) => tcx.types.u8,
55             (I16, false) => tcx.types.u16,
56             (I32, false) => tcx.types.u32,
57             (I64, false) => tcx.types.u64,
58             (I128, false) => tcx.types.u128,
59             (I8, true) => tcx.types.i8,
60             (I16, true) => tcx.types.i16,
61             (I32, true) => tcx.types.i32,
62             (I64, true) => tcx.types.i64,
63             (I128, true) => tcx.types.i128,
64         }
65     }
66
67     /// Gets the Integer type from an attr::IntType.
68     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69         let dl = cx.data_layout();
70
71         match ity {
72             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78                 dl.ptr_sized_integer()
79             }
80         }
81     }
82
83     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
84         match ity {
85             ty::IntTy::I8 => I8,
86             ty::IntTy::I16 => I16,
87             ty::IntTy::I32 => I32,
88             ty::IntTy::I64 => I64,
89             ty::IntTy::I128 => I128,
90             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
91         }
92     }
93     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
94         match ity {
95             ty::UintTy::U8 => I8,
96             ty::UintTy::U16 => I16,
97             ty::UintTy::U32 => I32,
98             ty::UintTy::U64 => I64,
99             ty::UintTy::U128 => I128,
100             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
101         }
102     }
103
104     /// Finds the appropriate Integer type and signedness for the given
105     /// signed discriminant range and `#[repr]` attribute.
106     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107     /// that shouldn't affect anything, other than maybe debuginfo.
108     fn repr_discr<'tcx>(
109         tcx: TyCtxt<'tcx>,
110         ty: Ty<'tcx>,
111         repr: &ReprOptions,
112         min: i128,
113         max: i128,
114     ) -> (Integer, bool) {
115         // Theoretically, negative values could be larger in unsigned representation
116         // than the unsigned representation of the signed minimum. However, if there
117         // are any negative values, the only valid unsigned representation is u128
118         // which can fit all i128 values, so the result remains unaffected.
119         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
121
122         if let Some(ity) = repr.int {
123             let discr = Integer::from_attr(&tcx, ity);
124             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
125             if discr < fit {
126                 bug!(
127                     "Integer::repr_discr: `#[repr]` hint too small for \
128                       discriminant range of enum `{}",
129                     ty
130                 )
131             }
132             return (discr, ity.is_signed());
133         }
134
135         let at_least = if repr.c() {
136             // This is usually I32, however it can be different on some platforms,
137             // notably hexagon and arm-none/thumb-none
138             tcx.data_layout().c_enum_min_size
139         } else {
140             // repr(Rust) enums try to be as small as possible
141             I8
142         };
143
144         // If there are no negative values, we can use the unsigned fit.
145         if min >= 0 {
146             (cmp::max(unsigned_fit, at_least), false)
147         } else {
148             (cmp::max(signed_fit, at_least), true)
149         }
150     }
151 }
152
153 pub trait PrimitiveExt {
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 }
157
158 impl PrimitiveExt for Primitive {
159     #[inline]
160     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161         match *self {
162             Int(i, signed) => i.to_ty(tcx, signed),
163             F32 => tcx.types.f32,
164             F64 => tcx.types.f64,
165             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
166         }
167     }
168
169     /// Return an *integer* type matching this primitive.
170     /// Useful in particular when dealing with enum discriminants.
171     #[inline]
172     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173         match *self {
174             Int(i, signed) => i.to_ty(tcx, signed),
175             Pointer => tcx.types.usize,
176             F32 | F64 => bug!("floats do not have an int type"),
177         }
178     }
179 }
180
181 /// The first half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
186
187 /// The second half of a fat pointer.
188 ///
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
192
193 /// The maximum supported number of lanes in a SIMD vector.
194 ///
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
202     Unknown(Ty<'tcx>),
203     SizeOverflow(Ty<'tcx>),
204     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
205 }
206
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209         match *self {
210             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211             LayoutError::SizeOverflow(ty) => {
212                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213             }
214             LayoutError::NormalizationFailure(t, e) => write!(
215                 f,
216                 "unable to determine layout for `{}` because `{}` cannot be normalized",
217                 t,
218                 e.get_type_for_failure()
219             ),
220         }
221     }
222 }
223
224 #[instrument(skip(tcx, query), level = "debug")]
225 fn layout_of<'tcx>(
226     tcx: TyCtxt<'tcx>,
227     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
228 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
229     ty::tls::with_related_context(tcx, move |icx| {
230         let (param_env, ty) = query.into_parts();
231         debug!(?ty);
232
233         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
234             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
235         }
236
237         // Update the ImplicitCtxt to increase the layout_depth
238         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
239
240         ty::tls::enter_context(&icx, |_| {
241             let param_env = param_env.with_reveal_all_normalized(tcx);
242             let unnormalized_ty = ty;
243
244             // FIXME: We might want to have two different versions of `layout_of`:
245             // One that can be called after typecheck has completed and can use
246             // `normalize_erasing_regions` here and another one that can be called
247             // before typecheck has completed and uses `try_normalize_erasing_regions`.
248             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
249                 Ok(t) => t,
250                 Err(normalization_error) => {
251                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
252                 }
253             };
254
255             if ty != unnormalized_ty {
256                 // Ensure this layout is also cached for the normalized type.
257                 return tcx.layout_of(param_env.and(ty));
258             }
259
260             let cx = LayoutCx { tcx, param_env };
261
262             let layout = cx.layout_of_uncached(ty)?;
263             let layout = TyAndLayout { ty, layout };
264
265             cx.record_layout_for_printing(layout);
266
267             // Type-level uninhabitedness should always imply ABI uninhabitedness.
268             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
269                 assert!(layout.abi.is_uninhabited());
270             }
271
272             Ok(layout)
273         })
274     })
275 }
276
277 pub struct LayoutCx<'tcx, C> {
278     pub tcx: C,
279     pub param_env: ty::ParamEnv<'tcx>,
280 }
281
282 #[derive(Copy, Clone, Debug)]
283 enum StructKind {
284     /// A tuple, closure, or univariant which cannot be coerced to unsized.
285     AlwaysSized,
286     /// A univariant, the last field of which may be coerced to unsized.
287     MaybeUnsized,
288     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
289     Prefixed(Size, Align),
290 }
291
292 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
293 // This is used to go between `memory_index` (source field order to memory order)
294 // and `inverse_memory_index` (memory order to source field order).
295 // See also `FieldsShape::Arbitrary::memory_index` for more details.
296 // FIXME(eddyb) build a better abstraction for permutations, if possible.
297 fn invert_mapping(map: &[u32]) -> Vec<u32> {
298     let mut inverse = vec![0; map.len()];
299     for i in 0..map.len() {
300         inverse[map[i] as usize] = i as u32;
301     }
302     inverse
303 }
304
305 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
306     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
307         let dl = self.data_layout();
308         let b_align = b.align(dl);
309         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
310         let b_offset = a.size(dl).align_to(b_align.abi);
311         let size = (b_offset + b.size(dl)).align_to(align.abi);
312
313         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
314         // returns the last maximum.
315         let largest_niche = Niche::from_scalar(dl, b_offset, b)
316             .into_iter()
317             .chain(Niche::from_scalar(dl, Size::ZERO, a))
318             .max_by_key(|niche| niche.available(dl));
319
320         LayoutS {
321             variants: Variants::Single { index: VariantIdx::new(0) },
322             fields: FieldsShape::Arbitrary {
323                 offsets: vec![Size::ZERO, b_offset],
324                 memory_index: vec![0, 1],
325             },
326             abi: Abi::ScalarPair(a, b),
327             largest_niche,
328             align,
329             size,
330         }
331     }
332
333     fn univariant_uninterned(
334         &self,
335         ty: Ty<'tcx>,
336         fields: &[TyAndLayout<'_>],
337         repr: &ReprOptions,
338         kind: StructKind,
339     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
340         let dl = self.data_layout();
341         let pack = repr.pack;
342         if pack.is_some() && repr.align.is_some() {
343             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
344             return Err(LayoutError::Unknown(ty));
345         }
346
347         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
348
349         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
350
351         let optimize = !repr.inhibit_struct_field_reordering_opt();
352         if optimize {
353             let end =
354                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
355             let optimizing = &mut inverse_memory_index[..end];
356             let field_align = |f: &TyAndLayout<'_>| {
357                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
358             };
359
360             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
361             // the field ordering to try and catch some code making assumptions about layouts
362             // we don't guarantee
363             if repr.can_randomize_type_layout() {
364                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
365                 // randomize field ordering with
366                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
367
368                 // Shuffle the ordering of the fields
369                 optimizing.shuffle(&mut rng);
370
371             // Otherwise we just leave things alone and actually optimize the type's fields
372             } else {
373                 match kind {
374                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
375                         optimizing.sort_by_key(|&x| {
376                             // Place ZSTs first to avoid "interesting offsets",
377                             // especially with only one or two non-ZST fields.
378                             let f = &fields[x as usize];
379                             (!f.is_zst(), cmp::Reverse(field_align(f)))
380                         });
381                     }
382
383                     StructKind::Prefixed(..) => {
384                         // Sort in ascending alignment so that the layout stays optimal
385                         // regardless of the prefix
386                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
387                     }
388                 }
389
390                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
391                 //                 regardless of the status of `-Z randomize-layout`
392             }
393         }
394
395         // inverse_memory_index holds field indices by increasing memory offset.
396         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
397         // We now write field offsets to the corresponding offset slot;
398         // field 5 with offset 0 puts 0 in offsets[5].
399         // At the bottom of this function, we invert `inverse_memory_index` to
400         // produce `memory_index` (see `invert_mapping`).
401
402         let mut sized = true;
403         let mut offsets = vec![Size::ZERO; fields.len()];
404         let mut offset = Size::ZERO;
405         let mut largest_niche = None;
406         let mut largest_niche_available = 0;
407
408         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
409             let prefix_align =
410                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
411             align = align.max(AbiAndPrefAlign::new(prefix_align));
412             offset = prefix_size.align_to(prefix_align);
413         }
414
415         for &i in &inverse_memory_index {
416             let field = fields[i as usize];
417             if !sized {
418                 self.tcx.sess.delay_span_bug(
419                     DUMMY_SP,
420                     &format!(
421                         "univariant: field #{} of `{}` comes after unsized field",
422                         offsets.len(),
423                         ty
424                     ),
425                 );
426             }
427
428             if field.is_unsized() {
429                 sized = false;
430             }
431
432             // Invariant: offset < dl.obj_size_bound() <= 1<<61
433             let field_align = if let Some(pack) = pack {
434                 field.align.min(AbiAndPrefAlign::new(pack))
435             } else {
436                 field.align
437             };
438             offset = offset.align_to(field_align.abi);
439             align = align.max(field_align);
440
441             debug!("univariant offset: {:?} field: {:#?}", offset, field);
442             offsets[i as usize] = offset;
443
444             if !repr.hide_niche() {
445                 if let Some(mut niche) = field.largest_niche {
446                     let available = niche.available(dl);
447                     if available > largest_niche_available {
448                         largest_niche_available = available;
449                         niche.offset += offset;
450                         largest_niche = Some(niche);
451                     }
452                 }
453             }
454
455             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
456         }
457
458         if let Some(repr_align) = repr.align {
459             align = align.max(AbiAndPrefAlign::new(repr_align));
460         }
461
462         debug!("univariant min_size: {:?}", offset);
463         let min_size = offset;
464
465         // As stated above, inverse_memory_index holds field indices by increasing offset.
466         // This makes it an already-sorted view of the offsets vec.
467         // To invert it, consider:
468         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
469         // Field 5 would be the first element, so memory_index is i:
470         // Note: if we didn't optimize, it's already right.
471
472         let memory_index =
473             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
474
475         let size = min_size.align_to(align.abi);
476         let mut abi = Abi::Aggregate { sized };
477
478         // Unpack newtype ABIs and find scalar pairs.
479         if sized && size.bytes() > 0 {
480             // All other fields must be ZSTs.
481             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
482
483             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
484                 // We have exactly one non-ZST field.
485                 (Some((i, field)), None, None) => {
486                     // Field fills the struct and it has a scalar or scalar pair ABI.
487                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
488                     {
489                         match field.abi {
490                             // For plain scalars, or vectors of them, we can't unpack
491                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
492                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
493                                 abi = field.abi;
494                             }
495                             // But scalar pairs are Rust-specific and get
496                             // treated as aggregates by C ABIs anyway.
497                             Abi::ScalarPair(..) => {
498                                 abi = field.abi;
499                             }
500                             _ => {}
501                         }
502                     }
503                 }
504
505                 // Two non-ZST fields, and they're both scalars.
506                 (Some((i, a)), Some((j, b)), None) => {
507                     match (a.abi, b.abi) {
508                         (Abi::Scalar(a), Abi::Scalar(b)) => {
509                             // Order by the memory placement, not source order.
510                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
511                                 ((i, a), (j, b))
512                             } else {
513                                 ((j, b), (i, a))
514                             };
515                             let pair = self.scalar_pair(a, b);
516                             let pair_offsets = match pair.fields {
517                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
518                                     assert_eq!(memory_index, &[0, 1]);
519                                     offsets
520                                 }
521                                 _ => bug!(),
522                             };
523                             if offsets[i] == pair_offsets[0]
524                                 && offsets[j] == pair_offsets[1]
525                                 && align == pair.align
526                                 && size == pair.size
527                             {
528                                 // We can use `ScalarPair` only when it matches our
529                                 // already computed layout (including `#[repr(C)]`).
530                                 abi = pair.abi;
531                             }
532                         }
533                         _ => {}
534                     }
535                 }
536
537                 _ => {}
538             }
539         }
540
541         if fields.iter().any(|f| f.abi.is_uninhabited()) {
542             abi = Abi::Uninhabited;
543         }
544
545         Ok(LayoutS {
546             variants: Variants::Single { index: VariantIdx::new(0) },
547             fields: FieldsShape::Arbitrary { offsets, memory_index },
548             abi,
549             largest_niche,
550             align,
551             size,
552         })
553     }
554
555     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
556         let tcx = self.tcx;
557         let param_env = self.param_env;
558         let dl = self.data_layout();
559         let scalar_unit = |value: Primitive| {
560             let size = value.size(dl);
561             assert!(size.bits() <= 128);
562             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
563         };
564         let scalar =
565             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
566
567         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
568             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
569         };
570         debug_assert!(!ty.has_infer_types_or_consts());
571
572         Ok(match *ty.kind() {
573             // Basic scalars.
574             ty::Bool => tcx.intern_layout(LayoutS::scalar(
575                 self,
576                 Scalar::Initialized {
577                     value: Int(I8, false),
578                     valid_range: WrappingRange { start: 0, end: 1 },
579                 },
580             )),
581             ty::Char => tcx.intern_layout(LayoutS::scalar(
582                 self,
583                 Scalar::Initialized {
584                     value: Int(I32, false),
585                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
586                 },
587             )),
588             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
589             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
590             ty::Float(fty) => scalar(match fty {
591                 ty::FloatTy::F32 => F32,
592                 ty::FloatTy::F64 => F64,
593             }),
594             ty::FnPtr(_) => {
595                 let mut ptr = scalar_unit(Pointer);
596                 ptr.valid_range_mut().start = 1;
597                 tcx.intern_layout(LayoutS::scalar(self, ptr))
598             }
599
600             // The never type.
601             ty::Never => tcx.intern_layout(LayoutS {
602                 variants: Variants::Single { index: VariantIdx::new(0) },
603                 fields: FieldsShape::Primitive,
604                 abi: Abi::Uninhabited,
605                 largest_niche: None,
606                 align: dl.i8_align,
607                 size: Size::ZERO,
608             }),
609
610             // Potentially-wide pointers.
611             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
612                 let mut data_ptr = scalar_unit(Pointer);
613                 if !ty.is_unsafe_ptr() {
614                     data_ptr.valid_range_mut().start = 1;
615                 }
616
617                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
618                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
619                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
620                 }
621
622                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
623                 let metadata = match unsized_part.kind() {
624                     ty::Foreign(..) => {
625                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
626                     }
627                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
628                     ty::Dynamic(..) => {
629                         let mut vtable = scalar_unit(Pointer);
630                         vtable.valid_range_mut().start = 1;
631                         vtable
632                     }
633                     _ => return Err(LayoutError::Unknown(unsized_part)),
634                 };
635
636                 // Effectively a (ptr, meta) tuple.
637                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
638             }
639
640             // Arrays and slices.
641             ty::Array(element, mut count) => {
642                 if count.has_projections() {
643                     count = tcx.normalize_erasing_regions(param_env, count);
644                     if count.has_projections() {
645                         return Err(LayoutError::Unknown(ty));
646                     }
647                 }
648
649                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
650                 let element = self.layout_of(element)?;
651                 let size =
652                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
653
654                 let abi =
655                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
656                         Abi::Uninhabited
657                     } else {
658                         Abi::Aggregate { sized: true }
659                     };
660
661                 let largest_niche = if count != 0 { element.largest_niche } else { None };
662
663                 tcx.intern_layout(LayoutS {
664                     variants: Variants::Single { index: VariantIdx::new(0) },
665                     fields: FieldsShape::Array { stride: element.size, count },
666                     abi,
667                     largest_niche,
668                     align: element.align,
669                     size,
670                 })
671             }
672             ty::Slice(element) => {
673                 let element = self.layout_of(element)?;
674                 tcx.intern_layout(LayoutS {
675                     variants: Variants::Single { index: VariantIdx::new(0) },
676                     fields: FieldsShape::Array { stride: element.size, count: 0 },
677                     abi: Abi::Aggregate { sized: false },
678                     largest_niche: None,
679                     align: element.align,
680                     size: Size::ZERO,
681                 })
682             }
683             ty::Str => tcx.intern_layout(LayoutS {
684                 variants: Variants::Single { index: VariantIdx::new(0) },
685                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
686                 abi: Abi::Aggregate { sized: false },
687                 largest_niche: None,
688                 align: dl.i8_align,
689                 size: Size::ZERO,
690             }),
691
692             // Odd unit types.
693             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
694             ty::Dynamic(..) | ty::Foreign(..) => {
695                 let mut unit = self.univariant_uninterned(
696                     ty,
697                     &[],
698                     &ReprOptions::default(),
699                     StructKind::AlwaysSized,
700                 )?;
701                 match unit.abi {
702                     Abi::Aggregate { ref mut sized } => *sized = false,
703                     _ => bug!(),
704                 }
705                 tcx.intern_layout(unit)
706             }
707
708             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
709
710             ty::Closure(_, ref substs) => {
711                 let tys = substs.as_closure().upvar_tys();
712                 univariant(
713                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
714                     &ReprOptions::default(),
715                     StructKind::AlwaysSized,
716                 )?
717             }
718
719             ty::Tuple(tys) => {
720                 let kind =
721                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
722
723                 univariant(
724                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
725                     &ReprOptions::default(),
726                     kind,
727                 )?
728             }
729
730             // SIMD vector types.
731             ty::Adt(def, substs) if def.repr().simd() => {
732                 if !def.is_struct() {
733                     // Should have yielded E0517 by now.
734                     tcx.sess.delay_span_bug(
735                         DUMMY_SP,
736                         "#[repr(simd)] was applied to an ADT that is not a struct",
737                     );
738                     return Err(LayoutError::Unknown(ty));
739                 }
740
741                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
742                 //
743                 // * #[repr(simd)] struct S(T, T, T, T);
744                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
745                 // * #[repr(simd)] struct S([T; 4])
746                 //
747                 // where T is a primitive scalar (integer/float/pointer).
748
749                 // SIMD vectors with zero fields are not supported.
750                 // (should be caught by typeck)
751                 if def.non_enum_variant().fields.is_empty() {
752                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
753                 }
754
755                 // Type of the first ADT field:
756                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
757
758                 // Heterogeneous SIMD vectors are not supported:
759                 // (should be caught by typeck)
760                 for fi in &def.non_enum_variant().fields {
761                     if fi.ty(tcx, substs) != f0_ty {
762                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
763                     }
764                 }
765
766                 // The element type and number of elements of the SIMD vector
767                 // are obtained from:
768                 //
769                 // * the element type and length of the single array field, if
770                 // the first field is of array type, or
771                 //
772                 // * the homogenous field type and the number of fields.
773                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
774                     // First ADT field is an array:
775
776                     // SIMD vectors with multiple array fields are not supported:
777                     // (should be caught by typeck)
778                     if def.non_enum_variant().fields.len() != 1 {
779                         tcx.sess.fatal(&format!(
780                             "monomorphising SIMD type `{}` with more than one array field",
781                             ty
782                         ));
783                     }
784
785                     // Extract the number of elements from the layout of the array field:
786                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
787                         return Err(LayoutError::Unknown(ty));
788                     };
789
790                     (*e_ty, *count, true)
791                 } else {
792                     // First ADT field is not an array:
793                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
794                 };
795
796                 // SIMD vectors of zero length are not supported.
797                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
798                 // support.
799                 //
800                 // Can't be caught in typeck if the array length is generic.
801                 if e_len == 0 {
802                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
803                 } else if e_len > MAX_SIMD_LANES {
804                     tcx.sess.fatal(&format!(
805                         "monomorphising SIMD type `{}` of length greater than {}",
806                         ty, MAX_SIMD_LANES,
807                     ));
808                 }
809
810                 // Compute the ABI of the element type:
811                 let e_ly = self.layout_of(e_ty)?;
812                 let Abi::Scalar(e_abi) = e_ly.abi else {
813                     // This error isn't caught in typeck, e.g., if
814                     // the element type of the vector is generic.
815                     tcx.sess.fatal(&format!(
816                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
817                         (integer/float/pointer) element type `{}`",
818                         ty, e_ty
819                     ))
820                 };
821
822                 // Compute the size and alignment of the vector:
823                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
824                 let align = dl.vector_align(size);
825                 let size = size.align_to(align.abi);
826
827                 // Compute the placement of the vector fields:
828                 let fields = if is_array {
829                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
830                 } else {
831                     FieldsShape::Array { stride: e_ly.size, count: e_len }
832                 };
833
834                 tcx.intern_layout(LayoutS {
835                     variants: Variants::Single { index: VariantIdx::new(0) },
836                     fields,
837                     abi: Abi::Vector { element: e_abi, count: e_len },
838                     largest_niche: e_ly.largest_niche,
839                     size,
840                     align,
841                 })
842             }
843
844             // ADTs.
845             ty::Adt(def, substs) => {
846                 // Cache the field layouts.
847                 let variants = def
848                     .variants()
849                     .iter()
850                     .map(|v| {
851                         v.fields
852                             .iter()
853                             .map(|field| self.layout_of(field.ty(tcx, substs)))
854                             .collect::<Result<Vec<_>, _>>()
855                     })
856                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
857
858                 if def.is_union() {
859                     if def.repr().pack.is_some() && def.repr().align.is_some() {
860                         self.tcx.sess.delay_span_bug(
861                             tcx.def_span(def.did()),
862                             "union cannot be packed and aligned",
863                         );
864                         return Err(LayoutError::Unknown(ty));
865                     }
866
867                     let mut align =
868                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
869
870                     if let Some(repr_align) = def.repr().align {
871                         align = align.max(AbiAndPrefAlign::new(repr_align));
872                     }
873
874                     let optimize = !def.repr().inhibit_union_abi_opt();
875                     let mut size = Size::ZERO;
876                     let mut abi = Abi::Aggregate { sized: true };
877                     let index = VariantIdx::new(0);
878                     for field in &variants[index] {
879                         assert!(!field.is_unsized());
880                         align = align.max(field.align);
881
882                         // If all non-ZST fields have the same ABI, forward this ABI
883                         if optimize && !field.is_zst() {
884                             // Discard valid range information and allow undef
885                             let field_abi = match field.abi {
886                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
887                                 Abi::ScalarPair(x, y) => {
888                                     Abi::ScalarPair(x.to_union(), y.to_union())
889                                 }
890                                 Abi::Vector { element: x, count } => {
891                                     Abi::Vector { element: x.to_union(), count }
892                                 }
893                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
894                                     Abi::Aggregate { sized: true }
895                                 }
896                             };
897
898                             if size == Size::ZERO {
899                                 // first non ZST: initialize 'abi'
900                                 abi = field_abi;
901                             } else if abi != field_abi {
902                                 // different fields have different ABI: reset to Aggregate
903                                 abi = Abi::Aggregate { sized: true };
904                             }
905                         }
906
907                         size = cmp::max(size, field.size);
908                     }
909
910                     if let Some(pack) = def.repr().pack {
911                         align = align.min(AbiAndPrefAlign::new(pack));
912                     }
913
914                     return Ok(tcx.intern_layout(LayoutS {
915                         variants: Variants::Single { index },
916                         fields: FieldsShape::Union(
917                             NonZeroUsize::new(variants[index].len())
918                                 .ok_or(LayoutError::Unknown(ty))?,
919                         ),
920                         abi,
921                         largest_niche: None,
922                         align,
923                         size: size.align_to(align.abi),
924                     }));
925                 }
926
927                 // A variant is absent if it's uninhabited and only has ZST fields.
928                 // Present uninhabited variants only require space for their fields,
929                 // but *not* an encoding of the discriminant (e.g., a tag value).
930                 // See issue #49298 for more details on the need to leave space
931                 // for non-ZST uninhabited data (mostly partial initialization).
932                 let absent = |fields: &[TyAndLayout<'_>]| {
933                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
934                     let is_zst = fields.iter().all(|f| f.is_zst());
935                     uninhabited && is_zst
936                 };
937                 let (present_first, present_second) = {
938                     let mut present_variants = variants
939                         .iter_enumerated()
940                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
941                     (present_variants.next(), present_variants.next())
942                 };
943                 let present_first = match present_first {
944                     Some(present_first) => present_first,
945                     // Uninhabited because it has no variants, or only absent ones.
946                     None if def.is_enum() => {
947                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
948                     }
949                     // If it's a struct, still compute a layout so that we can still compute the
950                     // field offsets.
951                     None => VariantIdx::new(0),
952                 };
953
954                 let is_struct = !def.is_enum() ||
955                     // Only one variant is present.
956                     (present_second.is_none() &&
957                     // Representation optimizations are allowed.
958                     !def.repr().inhibit_enum_layout_opt());
959                 if is_struct {
960                     // Struct, or univariant enum equivalent to a struct.
961                     // (Typechecking will reject discriminant-sizing attrs.)
962
963                     let v = present_first;
964                     let kind = if def.is_enum() || variants[v].is_empty() {
965                         StructKind::AlwaysSized
966                     } else {
967                         let param_env = tcx.param_env(def.did());
968                         let last_field = def.variant(v).fields.last().unwrap();
969                         let always_sized =
970                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
971                         if !always_sized {
972                             StructKind::MaybeUnsized
973                         } else {
974                             StructKind::AlwaysSized
975                         }
976                     };
977
978                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
979                     st.variants = Variants::Single { index: v };
980                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
981                     match st.abi {
982                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
983                             // the asserts ensure that we are not using the
984                             // `#[rustc_layout_scalar_valid_range(n)]`
985                             // attribute to widen the range of anything as that would probably
986                             // result in UB somewhere
987                             // FIXME(eddyb) the asserts are probably not needed,
988                             // as larger validity ranges would result in missed
989                             // optimizations, *not* wrongly assuming the inner
990                             // value is valid. e.g. unions enlarge validity ranges,
991                             // because the values may be uninitialized.
992                             if let Bound::Included(start) = start {
993                                 // FIXME(eddyb) this might be incorrect - it doesn't
994                                 // account for wrap-around (end < start) ranges.
995                                 let valid_range = scalar.valid_range_mut();
996                                 assert!(valid_range.start <= start);
997                                 valid_range.start = start;
998                             }
999                             if let Bound::Included(end) = end {
1000                                 // FIXME(eddyb) this might be incorrect - it doesn't
1001                                 // account for wrap-around (end < start) ranges.
1002                                 let valid_range = scalar.valid_range_mut();
1003                                 assert!(valid_range.end >= end);
1004                                 valid_range.end = end;
1005                             }
1006
1007                             // Update `largest_niche` if we have introduced a larger niche.
1008                             let niche = if def.repr().hide_niche() {
1009                                 None
1010                             } else {
1011                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1012                             };
1013                             if let Some(niche) = niche {
1014                                 match st.largest_niche {
1015                                     Some(largest_niche) => {
1016                                         // Replace the existing niche even if they're equal,
1017                                         // because this one is at a lower offset.
1018                                         if largest_niche.available(dl) <= niche.available(dl) {
1019                                             st.largest_niche = Some(niche);
1020                                         }
1021                                     }
1022                                     None => st.largest_niche = Some(niche),
1023                                 }
1024                             }
1025                         }
1026                         _ => assert!(
1027                             start == Bound::Unbounded && end == Bound::Unbounded,
1028                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1029                             def,
1030                             st,
1031                         ),
1032                     }
1033
1034                     return Ok(tcx.intern_layout(st));
1035                 }
1036
1037                 // At this point, we have handled all unions and
1038                 // structs. (We have also handled univariant enums
1039                 // that allow representation optimization.)
1040                 assert!(def.is_enum());
1041
1042                 // The current code for niche-filling relies on variant indices
1043                 // instead of actual discriminants, so dataful enums with
1044                 // explicit discriminants (RFC #2363) would misbehave.
1045                 let no_explicit_discriminants = def
1046                     .variants()
1047                     .iter_enumerated()
1048                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1049
1050                 let mut niche_filling_layout = None;
1051
1052                 // Niche-filling enum optimization.
1053                 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1054                     let mut dataful_variant = None;
1055                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1056
1057                     // Find one non-ZST variant.
1058                     'variants: for (v, fields) in variants.iter_enumerated() {
1059                         if absent(fields) {
1060                             continue 'variants;
1061                         }
1062                         for f in fields {
1063                             if !f.is_zst() {
1064                                 if dataful_variant.is_none() {
1065                                     dataful_variant = Some(v);
1066                                     continue 'variants;
1067                                 } else {
1068                                     dataful_variant = None;
1069                                     break 'variants;
1070                                 }
1071                             }
1072                         }
1073                         niche_variants = *niche_variants.start().min(&v)..=v;
1074                     }
1075
1076                     if niche_variants.start() > niche_variants.end() {
1077                         dataful_variant = None;
1078                     }
1079
1080                     if let Some(i) = dataful_variant {
1081                         let count = (niche_variants.end().as_u32()
1082                             - niche_variants.start().as_u32()
1083                             + 1) as u128;
1084
1085                         // Find the field with the largest niche
1086                         let niche_candidate = variants[i]
1087                             .iter()
1088                             .enumerate()
1089                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1090                             .max_by_key(|(_, niche)| niche.available(dl));
1091
1092                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1093                             niche_candidate.and_then(|(field_index, niche)| {
1094                                 Some((field_index, niche, niche.reserve(self, count)?))
1095                             })
1096                         {
1097                             let mut align = dl.aggregate_align;
1098                             let st = variants
1099                                 .iter_enumerated()
1100                                 .map(|(j, v)| {
1101                                     let mut st = self.univariant_uninterned(
1102                                         ty,
1103                                         v,
1104                                         &def.repr(),
1105                                         StructKind::AlwaysSized,
1106                                     )?;
1107                                     st.variants = Variants::Single { index: j };
1108
1109                                     align = align.max(st.align);
1110
1111                                     Ok(tcx.intern_layout(st))
1112                                 })
1113                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1114
1115                             let offset = st[i].fields().offset(field_index) + niche.offset;
1116                             let size = st[i].size();
1117
1118                             let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1119                                 Abi::Uninhabited
1120                             } else {
1121                                 match st[i].abi() {
1122                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1123                                     Abi::ScalarPair(first, second) => {
1124                                         // Only the niche is guaranteed to be initialised,
1125                                         // so use union layout for the other primitive.
1126                                         if offset.bytes() == 0 {
1127                                             Abi::ScalarPair(niche_scalar, second.to_union())
1128                                         } else {
1129                                             Abi::ScalarPair(first.to_union(), niche_scalar)
1130                                         }
1131                                     }
1132                                     _ => Abi::Aggregate { sized: true },
1133                                 }
1134                             };
1135
1136                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1137
1138                             niche_filling_layout = Some(LayoutS {
1139                                 variants: Variants::Multiple {
1140                                     tag: niche_scalar,
1141                                     tag_encoding: TagEncoding::Niche {
1142                                         dataful_variant: i,
1143                                         niche_variants,
1144                                         niche_start,
1145                                     },
1146                                     tag_field: 0,
1147                                     variants: st,
1148                                 },
1149                                 fields: FieldsShape::Arbitrary {
1150                                     offsets: vec![offset],
1151                                     memory_index: vec![0],
1152                                 },
1153                                 abi,
1154                                 largest_niche,
1155                                 size,
1156                                 align,
1157                             });
1158                         }
1159                     }
1160                 }
1161
1162                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1163                 let discr_type = def.repr().discr_type();
1164                 let bits = Integer::from_attr(self, discr_type).size().bits();
1165                 for (i, discr) in def.discriminants(tcx) {
1166                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1167                         continue;
1168                     }
1169                     let mut x = discr.val as i128;
1170                     if discr_type.is_signed() {
1171                         // sign extend the raw representation to be an i128
1172                         x = (x << (128 - bits)) >> (128 - bits);
1173                     }
1174                     if x < min {
1175                         min = x;
1176                     }
1177                     if x > max {
1178                         max = x;
1179                     }
1180                 }
1181                 // We might have no inhabited variants, so pretend there's at least one.
1182                 if (min, max) == (i128::MAX, i128::MIN) {
1183                     min = 0;
1184                     max = 0;
1185                 }
1186                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1187                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1188
1189                 let mut align = dl.aggregate_align;
1190                 let mut size = Size::ZERO;
1191
1192                 // We're interested in the smallest alignment, so start large.
1193                 let mut start_align = Align::from_bytes(256).unwrap();
1194                 assert_eq!(Integer::for_align(dl, start_align), None);
1195
1196                 // repr(C) on an enum tells us to make a (tag, union) layout,
1197                 // so we need to grow the prefix alignment to be at least
1198                 // the alignment of the union. (This value is used both for
1199                 // determining the alignment of the overall enum, and the
1200                 // determining the alignment of the payload after the tag.)
1201                 let mut prefix_align = min_ity.align(dl).abi;
1202                 if def.repr().c() {
1203                     for fields in &variants {
1204                         for field in fields {
1205                             prefix_align = prefix_align.max(field.align.abi);
1206                         }
1207                     }
1208                 }
1209
1210                 // Create the set of structs that represent each variant.
1211                 let mut layout_variants = variants
1212                     .iter_enumerated()
1213                     .map(|(i, field_layouts)| {
1214                         let mut st = self.univariant_uninterned(
1215                             ty,
1216                             &field_layouts,
1217                             &def.repr(),
1218                             StructKind::Prefixed(min_ity.size(), prefix_align),
1219                         )?;
1220                         st.variants = Variants::Single { index: i };
1221                         // Find the first field we can't move later
1222                         // to make room for a larger discriminant.
1223                         for field in
1224                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1225                         {
1226                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1227                                 start_align = start_align.min(field.align.abi);
1228                                 break;
1229                             }
1230                         }
1231                         size = cmp::max(size, st.size);
1232                         align = align.max(st.align);
1233                         Ok(st)
1234                     })
1235                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1236
1237                 // Align the maximum variant size to the largest alignment.
1238                 size = size.align_to(align.abi);
1239
1240                 if size.bytes() >= dl.obj_size_bound() {
1241                     return Err(LayoutError::SizeOverflow(ty));
1242                 }
1243
1244                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1245                 if typeck_ity < min_ity {
1246                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1247                     // some reason at this point (based on values discriminant can take on). Mostly
1248                     // because this discriminant will be loaded, and then stored into variable of
1249                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1250                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1251                     // discriminant values. That would be a bug, because then, in codegen, in order
1252                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1253                     // space necessary to represent would have to be discarded (or layout is wrong
1254                     // on thinking it needs 16 bits)
1255                     bug!(
1256                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1257                         min_ity,
1258                         typeck_ity
1259                     );
1260                     // However, it is fine to make discr type however large (as an optimisation)
1261                     // after this point â€“ we’ll just truncate the value we load in codegen.
1262                 }
1263
1264                 // Check to see if we should use a different type for the
1265                 // discriminant. We can safely use a type with the same size
1266                 // as the alignment of the first field of each variant.
1267                 // We increase the size of the discriminant to avoid LLVM copying
1268                 // padding when it doesn't need to. This normally causes unaligned
1269                 // load/stores and excessive memcpy/memset operations. By using a
1270                 // bigger integer size, LLVM can be sure about its contents and
1271                 // won't be so conservative.
1272
1273                 // Use the initial field alignment
1274                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1275                     min_ity
1276                 } else {
1277                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1278                 };
1279
1280                 // If the alignment is not larger than the chosen discriminant size,
1281                 // don't use the alignment as the final size.
1282                 if ity <= min_ity {
1283                     ity = min_ity;
1284                 } else {
1285                     // Patch up the variants' first few fields.
1286                     let old_ity_size = min_ity.size();
1287                     let new_ity_size = ity.size();
1288                     for variant in &mut layout_variants {
1289                         match variant.fields {
1290                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1291                                 for i in offsets {
1292                                     if *i <= old_ity_size {
1293                                         assert_eq!(*i, old_ity_size);
1294                                         *i = new_ity_size;
1295                                     }
1296                                 }
1297                                 // We might be making the struct larger.
1298                                 if variant.size <= old_ity_size {
1299                                     variant.size = new_ity_size;
1300                                 }
1301                             }
1302                             _ => bug!(),
1303                         }
1304                     }
1305                 }
1306
1307                 let tag_mask = ity.size().unsigned_int_max();
1308                 let tag = Scalar::Initialized {
1309                     value: Int(ity, signed),
1310                     valid_range: WrappingRange {
1311                         start: (min as u128 & tag_mask),
1312                         end: (max as u128 & tag_mask),
1313                     },
1314                 };
1315                 let mut abi = Abi::Aggregate { sized: true };
1316
1317                 // Without latter check aligned enums with custom discriminant values
1318                 // Would result in ICE see the issue #92464 for more info
1319                 if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1320                     abi = Abi::Scalar(tag);
1321                 } else {
1322                     // Try to use a ScalarPair for all tagged enums.
1323                     let mut common_prim = None;
1324                     let mut common_prim_initialized_in_all_variants = true;
1325                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1326                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1327                             bug!();
1328                         };
1329                         let mut fields =
1330                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1331                         let (field, offset) = match (fields.next(), fields.next()) {
1332                             (None, None) => {
1333                                 common_prim_initialized_in_all_variants = false;
1334                                 continue;
1335                             }
1336                             (Some(pair), None) => pair,
1337                             _ => {
1338                                 common_prim = None;
1339                                 break;
1340                             }
1341                         };
1342                         let prim = match field.abi {
1343                             Abi::Scalar(scalar) => {
1344                                 common_prim_initialized_in_all_variants &=
1345                                     matches!(scalar, Scalar::Initialized { .. });
1346                                 scalar.primitive()
1347                             }
1348                             _ => {
1349                                 common_prim = None;
1350                                 break;
1351                             }
1352                         };
1353                         if let Some(pair) = common_prim {
1354                             // This is pretty conservative. We could go fancier
1355                             // by conflating things like i32 and u32, or even
1356                             // realising that (u8, u8) could just cohabit with
1357                             // u16 or even u32.
1358                             if pair != (prim, offset) {
1359                                 common_prim = None;
1360                                 break;
1361                             }
1362                         } else {
1363                             common_prim = Some((prim, offset));
1364                         }
1365                     }
1366                     if let Some((prim, offset)) = common_prim {
1367                         let prim_scalar = if common_prim_initialized_in_all_variants {
1368                             scalar_unit(prim)
1369                         } else {
1370                             // Common prim might be uninit.
1371                             Scalar::Union { value: prim }
1372                         };
1373                         let pair = self.scalar_pair(tag, prim_scalar);
1374                         let pair_offsets = match pair.fields {
1375                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1376                                 assert_eq!(memory_index, &[0, 1]);
1377                                 offsets
1378                             }
1379                             _ => bug!(),
1380                         };
1381                         if pair_offsets[0] == Size::ZERO
1382                             && pair_offsets[1] == *offset
1383                             && align == pair.align
1384                             && size == pair.size
1385                         {
1386                             // We can use `ScalarPair` only when it matches our
1387                             // already computed layout (including `#[repr(C)]`).
1388                             abi = pair.abi;
1389                         }
1390                     }
1391                 }
1392
1393                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1394                     abi = Abi::Uninhabited;
1395                 }
1396
1397                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1398
1399                 let layout_variants =
1400                     layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1401
1402                 let tagged_layout = LayoutS {
1403                     variants: Variants::Multiple {
1404                         tag,
1405                         tag_encoding: TagEncoding::Direct,
1406                         tag_field: 0,
1407                         variants: layout_variants,
1408                     },
1409                     fields: FieldsShape::Arbitrary {
1410                         offsets: vec![Size::ZERO],
1411                         memory_index: vec![0],
1412                     },
1413                     largest_niche,
1414                     abi,
1415                     align,
1416                     size,
1417                 };
1418
1419                 let best_layout = match (tagged_layout, niche_filling_layout) {
1420                     (tagged_layout, Some(niche_filling_layout)) => {
1421                         // Pick the smaller layout; otherwise,
1422                         // pick the layout with the larger niche; otherwise,
1423                         // pick tagged as it has simpler codegen.
1424                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1425                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1426                             (layout.size, cmp::Reverse(niche_size))
1427                         })
1428                     }
1429                     (tagged_layout, None) => tagged_layout,
1430                 };
1431
1432                 tcx.intern_layout(best_layout)
1433             }
1434
1435             // Types with no meaningful known layout.
1436             ty::Projection(_) | ty::Opaque(..) => {
1437                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1438                 // if that was possible, so there's no reason to try again here.
1439                 return Err(LayoutError::Unknown(ty));
1440             }
1441
1442             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1443                 bug!("Layout::compute: unexpected type `{}`", ty)
1444             }
1445
1446             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1447                 return Err(LayoutError::Unknown(ty));
1448             }
1449         })
1450     }
1451 }
1452
1453 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1454 #[derive(Clone, Debug, PartialEq)]
1455 enum SavedLocalEligibility {
1456     Unassigned,
1457     Assigned(VariantIdx),
1458     // FIXME: Use newtype_index so we aren't wasting bytes
1459     Ineligible(Option<u32>),
1460 }
1461
1462 // When laying out generators, we divide our saved local fields into two
1463 // categories: overlap-eligible and overlap-ineligible.
1464 //
1465 // Those fields which are ineligible for overlap go in a "prefix" at the
1466 // beginning of the layout, and always have space reserved for them.
1467 //
1468 // Overlap-eligible fields are only assigned to one variant, so we lay
1469 // those fields out for each variant and put them right after the
1470 // prefix.
1471 //
1472 // Finally, in the layout details, we point to the fields from the
1473 // variants they are assigned to. It is possible for some fields to be
1474 // included in multiple variants. No field ever "moves around" in the
1475 // layout; its offset is always the same.
1476 //
1477 // Also included in the layout are the upvars and the discriminant.
1478 // These are included as fields on the "outer" layout; they are not part
1479 // of any variant.
1480 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1481     /// Compute the eligibility and assignment of each local.
1482     fn generator_saved_local_eligibility(
1483         &self,
1484         info: &GeneratorLayout<'tcx>,
1485     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1486         use SavedLocalEligibility::*;
1487
1488         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1489             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1490
1491         // The saved locals not eligible for overlap. These will get
1492         // "promoted" to the prefix of our generator.
1493         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1494
1495         // Figure out which of our saved locals are fields in only
1496         // one variant. The rest are deemed ineligible for overlap.
1497         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1498             for local in fields {
1499                 match assignments[*local] {
1500                     Unassigned => {
1501                         assignments[*local] = Assigned(variant_index);
1502                     }
1503                     Assigned(idx) => {
1504                         // We've already seen this local at another suspension
1505                         // point, so it is no longer a candidate.
1506                         trace!(
1507                             "removing local {:?} in >1 variant ({:?}, {:?})",
1508                             local,
1509                             variant_index,
1510                             idx
1511                         );
1512                         ineligible_locals.insert(*local);
1513                         assignments[*local] = Ineligible(None);
1514                     }
1515                     Ineligible(_) => {}
1516                 }
1517             }
1518         }
1519
1520         // Next, check every pair of eligible locals to see if they
1521         // conflict.
1522         for local_a in info.storage_conflicts.rows() {
1523             let conflicts_a = info.storage_conflicts.count(local_a);
1524             if ineligible_locals.contains(local_a) {
1525                 continue;
1526             }
1527
1528             for local_b in info.storage_conflicts.iter(local_a) {
1529                 // local_a and local_b are storage live at the same time, therefore they
1530                 // cannot overlap in the generator layout. The only way to guarantee
1531                 // this is if they are in the same variant, or one is ineligible
1532                 // (which means it is stored in every variant).
1533                 if ineligible_locals.contains(local_b)
1534                     || assignments[local_a] == assignments[local_b]
1535                 {
1536                     continue;
1537                 }
1538
1539                 // If they conflict, we will choose one to make ineligible.
1540                 // This is not always optimal; it's just a greedy heuristic that
1541                 // seems to produce good results most of the time.
1542                 let conflicts_b = info.storage_conflicts.count(local_b);
1543                 let (remove, other) =
1544                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1545                 ineligible_locals.insert(remove);
1546                 assignments[remove] = Ineligible(None);
1547                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1548             }
1549         }
1550
1551         // Count the number of variants in use. If only one of them, then it is
1552         // impossible to overlap any locals in our layout. In this case it's
1553         // always better to make the remaining locals ineligible, so we can
1554         // lay them out with the other locals in the prefix and eliminate
1555         // unnecessary padding bytes.
1556         {
1557             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1558             for assignment in &assignments {
1559                 if let Assigned(idx) = assignment {
1560                     used_variants.insert(*idx);
1561                 }
1562             }
1563             if used_variants.count() < 2 {
1564                 for assignment in assignments.iter_mut() {
1565                     *assignment = Ineligible(None);
1566                 }
1567                 ineligible_locals.insert_all();
1568             }
1569         }
1570
1571         // Write down the order of our locals that will be promoted to the prefix.
1572         {
1573             for (idx, local) in ineligible_locals.iter().enumerate() {
1574                 assignments[local] = Ineligible(Some(idx as u32));
1575             }
1576         }
1577         debug!("generator saved local assignments: {:?}", assignments);
1578
1579         (ineligible_locals, assignments)
1580     }
1581
1582     /// Compute the full generator layout.
1583     fn generator_layout(
1584         &self,
1585         ty: Ty<'tcx>,
1586         def_id: hir::def_id::DefId,
1587         substs: SubstsRef<'tcx>,
1588     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1589         use SavedLocalEligibility::*;
1590         let tcx = self.tcx;
1591         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1592
1593         let Some(info) = tcx.generator_layout(def_id) else {
1594             return Err(LayoutError::Unknown(ty));
1595         };
1596         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1597
1598         // Build a prefix layout, including "promoting" all ineligible
1599         // locals as part of the prefix. We compute the layout of all of
1600         // these fields at once to get optimal packing.
1601         let tag_index = substs.as_generator().prefix_tys().count();
1602
1603         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1604         let max_discr = (info.variant_fields.len() - 1) as u128;
1605         let discr_int = Integer::fit_unsigned(max_discr);
1606         let discr_int_ty = discr_int.to_ty(tcx, false);
1607         let tag = Scalar::Initialized {
1608             value: Primitive::Int(discr_int, false),
1609             valid_range: WrappingRange { start: 0, end: max_discr },
1610         };
1611         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1612         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1613
1614         let promoted_layouts = ineligible_locals
1615             .iter()
1616             .map(|local| subst_field(info.field_tys[local]))
1617             .map(|ty| tcx.mk_maybe_uninit(ty))
1618             .map(|ty| self.layout_of(ty));
1619         let prefix_layouts = substs
1620             .as_generator()
1621             .prefix_tys()
1622             .map(|ty| self.layout_of(ty))
1623             .chain(iter::once(Ok(tag_layout)))
1624             .chain(promoted_layouts)
1625             .collect::<Result<Vec<_>, _>>()?;
1626         let prefix = self.univariant_uninterned(
1627             ty,
1628             &prefix_layouts,
1629             &ReprOptions::default(),
1630             StructKind::AlwaysSized,
1631         )?;
1632
1633         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1634
1635         // Split the prefix layout into the "outer" fields (upvars and
1636         // discriminant) and the "promoted" fields. Promoted fields will
1637         // get included in each variant that requested them in
1638         // GeneratorLayout.
1639         debug!("prefix = {:#?}", prefix);
1640         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1641             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1642                 let mut inverse_memory_index = invert_mapping(&memory_index);
1643
1644                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1645                 // "outer" and "promoted" fields respectively.
1646                 let b_start = (tag_index + 1) as u32;
1647                 let offsets_b = offsets.split_off(b_start as usize);
1648                 let offsets_a = offsets;
1649
1650                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1651                 // by preserving the order but keeping only one disjoint "half" each.
1652                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1653                 let inverse_memory_index_b: Vec<_> =
1654                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1655                 inverse_memory_index.retain(|&i| i < b_start);
1656                 let inverse_memory_index_a = inverse_memory_index;
1657
1658                 // Since `inverse_memory_index_{a,b}` each only refer to their
1659                 // respective fields, they can be safely inverted
1660                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1661                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1662
1663                 let outer_fields =
1664                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1665                 (outer_fields, offsets_b, memory_index_b)
1666             }
1667             _ => bug!(),
1668         };
1669
1670         let mut size = prefix.size;
1671         let mut align = prefix.align;
1672         let variants = info
1673             .variant_fields
1674             .iter_enumerated()
1675             .map(|(index, variant_fields)| {
1676                 // Only include overlap-eligible fields when we compute our variant layout.
1677                 let variant_only_tys = variant_fields
1678                     .iter()
1679                     .filter(|local| match assignments[**local] {
1680                         Unassigned => bug!(),
1681                         Assigned(v) if v == index => true,
1682                         Assigned(_) => bug!("assignment does not match variant"),
1683                         Ineligible(_) => false,
1684                     })
1685                     .map(|local| subst_field(info.field_tys[*local]));
1686
1687                 let mut variant = self.univariant_uninterned(
1688                     ty,
1689                     &variant_only_tys
1690                         .map(|ty| self.layout_of(ty))
1691                         .collect::<Result<Vec<_>, _>>()?,
1692                     &ReprOptions::default(),
1693                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1694                 )?;
1695                 variant.variants = Variants::Single { index };
1696
1697                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1698                     bug!();
1699                 };
1700
1701                 // Now, stitch the promoted and variant-only fields back together in
1702                 // the order they are mentioned by our GeneratorLayout.
1703                 // Because we only use some subset (that can differ between variants)
1704                 // of the promoted fields, we can't just pick those elements of the
1705                 // `promoted_memory_index` (as we'd end up with gaps).
1706                 // So instead, we build an "inverse memory_index", as if all of the
1707                 // promoted fields were being used, but leave the elements not in the
1708                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1709                 // obtain a valid (bijective) mapping.
1710                 const INVALID_FIELD_IDX: u32 = !0;
1711                 let mut combined_inverse_memory_index =
1712                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1713                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1714                 let combined_offsets = variant_fields
1715                     .iter()
1716                     .enumerate()
1717                     .map(|(i, local)| {
1718                         let (offset, memory_index) = match assignments[*local] {
1719                             Unassigned => bug!(),
1720                             Assigned(_) => {
1721                                 let (offset, memory_index) =
1722                                     offsets_and_memory_index.next().unwrap();
1723                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1724                             }
1725                             Ineligible(field_idx) => {
1726                                 let field_idx = field_idx.unwrap() as usize;
1727                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1728                             }
1729                         };
1730                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1731                         offset
1732                     })
1733                     .collect();
1734
1735                 // Remove the unused slots and invert the mapping to obtain the
1736                 // combined `memory_index` (also see previous comment).
1737                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1738                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1739
1740                 variant.fields = FieldsShape::Arbitrary {
1741                     offsets: combined_offsets,
1742                     memory_index: combined_memory_index,
1743                 };
1744
1745                 size = size.max(variant.size);
1746                 align = align.max(variant.align);
1747                 Ok(tcx.intern_layout(variant))
1748             })
1749             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1750
1751         size = size.align_to(align.abi);
1752
1753         let abi =
1754             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1755                 Abi::Uninhabited
1756             } else {
1757                 Abi::Aggregate { sized: true }
1758             };
1759
1760         let layout = tcx.intern_layout(LayoutS {
1761             variants: Variants::Multiple {
1762                 tag,
1763                 tag_encoding: TagEncoding::Direct,
1764                 tag_field: tag_index,
1765                 variants,
1766             },
1767             fields: outer_fields,
1768             abi,
1769             largest_niche: prefix.largest_niche,
1770             size,
1771             align,
1772         });
1773         debug!("generator layout ({:?}): {:#?}", ty, layout);
1774         Ok(layout)
1775     }
1776
1777     /// This is invoked by the `layout_of` query to record the final
1778     /// layout of each type.
1779     #[inline(always)]
1780     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1781         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1782         // for dumping later.
1783         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1784             self.record_layout_for_printing_outlined(layout)
1785         }
1786     }
1787
1788     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1789         // Ignore layouts that are done with non-empty environments or
1790         // non-monomorphic layouts, as the user only wants to see the stuff
1791         // resulting from the final codegen session.
1792         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1793             return;
1794         }
1795
1796         // (delay format until we actually need it)
1797         let record = |kind, packed, opt_discr_size, variants| {
1798             let type_desc = format!("{:?}", layout.ty);
1799             self.tcx.sess.code_stats.record_type_size(
1800                 kind,
1801                 type_desc,
1802                 layout.align.abi,
1803                 layout.size,
1804                 packed,
1805                 opt_discr_size,
1806                 variants,
1807             );
1808         };
1809
1810         let adt_def = match *layout.ty.kind() {
1811             ty::Adt(ref adt_def, _) => {
1812                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1813                 adt_def
1814             }
1815
1816             ty::Closure(..) => {
1817                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1818                 record(DataTypeKind::Closure, false, None, vec![]);
1819                 return;
1820             }
1821
1822             _ => {
1823                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1824                 return;
1825             }
1826         };
1827
1828         let adt_kind = adt_def.adt_kind();
1829         let adt_packed = adt_def.repr().pack.is_some();
1830
1831         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1832             let mut min_size = Size::ZERO;
1833             let field_info: Vec<_> = flds
1834                 .iter()
1835                 .enumerate()
1836                 .map(|(i, &name)| {
1837                     let field_layout = layout.field(self, i);
1838                     let offset = layout.fields.offset(i);
1839                     let field_end = offset + field_layout.size;
1840                     if min_size < field_end {
1841                         min_size = field_end;
1842                     }
1843                     FieldInfo {
1844                         name: name.to_string(),
1845                         offset: offset.bytes(),
1846                         size: field_layout.size.bytes(),
1847                         align: field_layout.align.abi.bytes(),
1848                     }
1849                 })
1850                 .collect();
1851
1852             VariantInfo {
1853                 name: n.map(|n| n.to_string()),
1854                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1855                 align: layout.align.abi.bytes(),
1856                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1857                 fields: field_info,
1858             }
1859         };
1860
1861         match layout.variants {
1862             Variants::Single { index } => {
1863                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1864                     debug!(
1865                         "print-type-size `{:#?}` variant {}",
1866                         layout,
1867                         adt_def.variant(index).name
1868                     );
1869                     let variant_def = &adt_def.variant(index);
1870                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1871                     record(
1872                         adt_kind.into(),
1873                         adt_packed,
1874                         None,
1875                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1876                     );
1877                 } else {
1878                     // (This case arises for *empty* enums; so give it
1879                     // zero variants.)
1880                     record(adt_kind.into(), adt_packed, None, vec![]);
1881                 }
1882             }
1883
1884             Variants::Multiple { tag, ref tag_encoding, .. } => {
1885                 debug!(
1886                     "print-type-size `{:#?}` adt general variants def {}",
1887                     layout.ty,
1888                     adt_def.variants().len()
1889                 );
1890                 let variant_infos: Vec<_> = adt_def
1891                     .variants()
1892                     .iter_enumerated()
1893                     .map(|(i, variant_def)| {
1894                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1895                         build_variant_info(
1896                             Some(variant_def.name),
1897                             &fields,
1898                             layout.for_variant(self, i),
1899                         )
1900                     })
1901                     .collect();
1902                 record(
1903                     adt_kind.into(),
1904                     adt_packed,
1905                     match tag_encoding {
1906                         TagEncoding::Direct => Some(tag.size(self)),
1907                         _ => None,
1908                     },
1909                     variant_infos,
1910                 );
1911             }
1912         }
1913     }
1914 }
1915
1916 /// Type size "skeleton", i.e., the only information determining a type's size.
1917 /// While this is conservative, (aside from constant sizes, only pointers,
1918 /// newtypes thereof and null pointer optimized enums are allowed), it is
1919 /// enough to statically check common use cases of transmute.
1920 #[derive(Copy, Clone, Debug)]
1921 pub enum SizeSkeleton<'tcx> {
1922     /// Any statically computable Layout.
1923     Known(Size),
1924
1925     /// A potentially-fat pointer.
1926     Pointer {
1927         /// If true, this pointer is never null.
1928         non_zero: bool,
1929         /// The type which determines the unsized metadata, if any,
1930         /// of this pointer. Either a type parameter or a projection
1931         /// depending on one, with regions erased.
1932         tail: Ty<'tcx>,
1933     },
1934 }
1935
1936 impl<'tcx> SizeSkeleton<'tcx> {
1937     pub fn compute(
1938         ty: Ty<'tcx>,
1939         tcx: TyCtxt<'tcx>,
1940         param_env: ty::ParamEnv<'tcx>,
1941     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1942         debug_assert!(!ty.has_infer_types_or_consts());
1943
1944         // First try computing a static layout.
1945         let err = match tcx.layout_of(param_env.and(ty)) {
1946             Ok(layout) => {
1947                 return Ok(SizeSkeleton::Known(layout.size));
1948             }
1949             Err(err) => err,
1950         };
1951
1952         match *ty.kind() {
1953             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1954                 let non_zero = !ty.is_unsafe_ptr();
1955                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1956                 match tail.kind() {
1957                     ty::Param(_) | ty::Projection(_) => {
1958                         debug_assert!(tail.has_param_types_or_consts());
1959                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1960                     }
1961                     _ => bug!(
1962                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1963                               tail `{}` is not a type parameter or a projection",
1964                         ty,
1965                         err,
1966                         tail
1967                     ),
1968                 }
1969             }
1970
1971             ty::Adt(def, substs) => {
1972                 // Only newtypes and enums w/ nullable pointer optimization.
1973                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1974                     return Err(err);
1975                 }
1976
1977                 // Get a zero-sized variant or a pointer newtype.
1978                 let zero_or_ptr_variant = |i| {
1979                     let i = VariantIdx::new(i);
1980                     let fields =
1981                         def.variant(i).fields.iter().map(|field| {
1982                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1983                         });
1984                     let mut ptr = None;
1985                     for field in fields {
1986                         let field = field?;
1987                         match field {
1988                             SizeSkeleton::Known(size) => {
1989                                 if size.bytes() > 0 {
1990                                     return Err(err);
1991                                 }
1992                             }
1993                             SizeSkeleton::Pointer { .. } => {
1994                                 if ptr.is_some() {
1995                                     return Err(err);
1996                                 }
1997                                 ptr = Some(field);
1998                             }
1999                         }
2000                     }
2001                     Ok(ptr)
2002                 };
2003
2004                 let v0 = zero_or_ptr_variant(0)?;
2005                 // Newtype.
2006                 if def.variants().len() == 1 {
2007                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2008                         return Ok(SizeSkeleton::Pointer {
2009                             non_zero: non_zero
2010                                 || match tcx.layout_scalar_valid_range(def.did()) {
2011                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2012                                     (Bound::Included(start), Bound::Included(end)) => {
2013                                         0 < start && start < end
2014                                     }
2015                                     _ => false,
2016                                 },
2017                             tail,
2018                         });
2019                     } else {
2020                         return Err(err);
2021                     }
2022                 }
2023
2024                 let v1 = zero_or_ptr_variant(1)?;
2025                 // Nullable pointer enum optimization.
2026                 match (v0, v1) {
2027                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2028                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2029                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2030                     }
2031                     _ => Err(err),
2032                 }
2033             }
2034
2035             ty::Projection(_) | ty::Opaque(..) => {
2036                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2037                 if ty == normalized {
2038                     Err(err)
2039                 } else {
2040                     SizeSkeleton::compute(normalized, tcx, param_env)
2041                 }
2042             }
2043
2044             _ => Err(err),
2045         }
2046     }
2047
2048     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2049         match (self, other) {
2050             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2051             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2052                 a == b
2053             }
2054             _ => false,
2055         }
2056     }
2057 }
2058
2059 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2060     fn tcx(&self) -> TyCtxt<'tcx>;
2061 }
2062
2063 pub trait HasParamEnv<'tcx> {
2064     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2065 }
2066
2067 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2068     #[inline]
2069     fn data_layout(&self) -> &TargetDataLayout {
2070         &self.data_layout
2071     }
2072 }
2073
2074 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2075     fn target_spec(&self) -> &Target {
2076         &self.sess.target
2077     }
2078 }
2079
2080 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2081     #[inline]
2082     fn tcx(&self) -> TyCtxt<'tcx> {
2083         *self
2084     }
2085 }
2086
2087 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2088     #[inline]
2089     fn data_layout(&self) -> &TargetDataLayout {
2090         &self.data_layout
2091     }
2092 }
2093
2094 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2095     fn target_spec(&self) -> &Target {
2096         &self.sess.target
2097     }
2098 }
2099
2100 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2101     #[inline]
2102     fn tcx(&self) -> TyCtxt<'tcx> {
2103         **self
2104     }
2105 }
2106
2107 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2108     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2109         self.param_env
2110     }
2111 }
2112
2113 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2114     fn data_layout(&self) -> &TargetDataLayout {
2115         self.tcx.data_layout()
2116     }
2117 }
2118
2119 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2120     fn target_spec(&self) -> &Target {
2121         self.tcx.target_spec()
2122     }
2123 }
2124
2125 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2126     fn tcx(&self) -> TyCtxt<'tcx> {
2127         self.tcx.tcx()
2128     }
2129 }
2130
2131 pub trait MaybeResult<T> {
2132     type Error;
2133
2134     fn from(x: Result<T, Self::Error>) -> Self;
2135     fn to_result(self) -> Result<T, Self::Error>;
2136 }
2137
2138 impl<T> MaybeResult<T> for T {
2139     type Error = !;
2140
2141     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2142         x
2143     }
2144     fn to_result(self) -> Result<T, Self::Error> {
2145         Ok(self)
2146     }
2147 }
2148
2149 impl<T, E> MaybeResult<T> for Result<T, E> {
2150     type Error = E;
2151
2152     fn from(x: Result<T, Self::Error>) -> Self {
2153         x
2154     }
2155     fn to_result(self) -> Result<T, Self::Error> {
2156         self
2157     }
2158 }
2159
2160 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2161
2162 /// Trait for contexts that want to be able to compute layouts of types.
2163 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2164 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2165     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2166     /// returned from `layout_of` (see also `handle_layout_err`).
2167     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2168
2169     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2170     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2171     #[inline]
2172     fn layout_tcx_at_span(&self) -> Span {
2173         DUMMY_SP
2174     }
2175
2176     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2177     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2178     ///
2179     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2180     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2181     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2182     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2183     fn handle_layout_err(
2184         &self,
2185         err: LayoutError<'tcx>,
2186         span: Span,
2187         ty: Ty<'tcx>,
2188     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2189 }
2190
2191 /// Blanket extension trait for contexts that can compute layouts of types.
2192 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2193     /// Computes the layout of a type. Note that this implicitly
2194     /// executes in "reveal all" mode, and will normalize the input type.
2195     #[inline]
2196     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2197         self.spanned_layout_of(ty, DUMMY_SP)
2198     }
2199
2200     /// Computes the layout of a type, at `span`. Note that this implicitly
2201     /// executes in "reveal all" mode, and will normalize the input type.
2202     // FIXME(eddyb) avoid passing information like this, and instead add more
2203     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2204     #[inline]
2205     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2206         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2207         let tcx = self.tcx().at(span);
2208
2209         MaybeResult::from(
2210             tcx.layout_of(self.param_env().and(ty))
2211                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2212         )
2213     }
2214 }
2215
2216 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2217
2218 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2219     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2220
2221     #[inline]
2222     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2223         err
2224     }
2225 }
2226
2227 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2228     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2229
2230     #[inline]
2231     fn layout_tcx_at_span(&self) -> Span {
2232         self.tcx.span
2233     }
2234
2235     #[inline]
2236     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2237         err
2238     }
2239 }
2240
2241 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2242 where
2243     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2244 {
2245     fn ty_and_layout_for_variant(
2246         this: TyAndLayout<'tcx>,
2247         cx: &C,
2248         variant_index: VariantIdx,
2249     ) -> TyAndLayout<'tcx> {
2250         let layout = match this.variants {
2251             Variants::Single { index }
2252                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2253                 if index == variant_index &&
2254                 // Don't confuse variants of uninhabited enums with the enum itself.
2255                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2256                 this.fields != FieldsShape::Primitive =>
2257             {
2258                 this.layout
2259             }
2260
2261             Variants::Single { index } => {
2262                 let tcx = cx.tcx();
2263                 let param_env = cx.param_env();
2264
2265                 // Deny calling for_variant more than once for non-Single enums.
2266                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2267                     assert_eq!(original_layout.variants, Variants::Single { index });
2268                 }
2269
2270                 let fields = match this.ty.kind() {
2271                     ty::Adt(def, _) if def.variants().is_empty() =>
2272                         bug!("for_variant called on zero-variant enum"),
2273                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2274                     _ => bug!(),
2275                 };
2276                 tcx.intern_layout(LayoutS {
2277                     variants: Variants::Single { index: variant_index },
2278                     fields: match NonZeroUsize::new(fields) {
2279                         Some(fields) => FieldsShape::Union(fields),
2280                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2281                     },
2282                     abi: Abi::Uninhabited,
2283                     largest_niche: None,
2284                     align: tcx.data_layout.i8_align,
2285                     size: Size::ZERO,
2286                 })
2287             }
2288
2289             Variants::Multiple { ref variants, .. } => variants[variant_index],
2290         };
2291
2292         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2293
2294         TyAndLayout { ty: this.ty, layout }
2295     }
2296
2297     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2298         enum TyMaybeWithLayout<'tcx> {
2299             Ty(Ty<'tcx>),
2300             TyAndLayout(TyAndLayout<'tcx>),
2301         }
2302
2303         fn field_ty_or_layout<'tcx>(
2304             this: TyAndLayout<'tcx>,
2305             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2306             i: usize,
2307         ) -> TyMaybeWithLayout<'tcx> {
2308             let tcx = cx.tcx();
2309             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2310                 TyAndLayout {
2311                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2312                     ty: tag.primitive().to_ty(tcx),
2313                 }
2314             };
2315
2316             match *this.ty.kind() {
2317                 ty::Bool
2318                 | ty::Char
2319                 | ty::Int(_)
2320                 | ty::Uint(_)
2321                 | ty::Float(_)
2322                 | ty::FnPtr(_)
2323                 | ty::Never
2324                 | ty::FnDef(..)
2325                 | ty::GeneratorWitness(..)
2326                 | ty::Foreign(..)
2327                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2328
2329                 // Potentially-fat pointers.
2330                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2331                     assert!(i < this.fields.count());
2332
2333                     // Reuse the fat `*T` type as its own thin pointer data field.
2334                     // This provides information about, e.g., DST struct pointees
2335                     // (which may have no non-DST form), and will work as long
2336                     // as the `Abi` or `FieldsShape` is checked by users.
2337                     if i == 0 {
2338                         let nil = tcx.mk_unit();
2339                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2340                             tcx.mk_mut_ptr(nil)
2341                         } else {
2342                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2343                         };
2344
2345                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2346                         // the `Result` should always work because the type is
2347                         // always either `*mut ()` or `&'static mut ()`.
2348                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2349                             ty: this.ty,
2350                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2351                         });
2352                     }
2353
2354                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2355                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2356                         ty::Dynamic(_, _) => {
2357                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2358                                 tcx.lifetimes.re_static,
2359                                 tcx.mk_array(tcx.types.usize, 3),
2360                             ))
2361                             /* FIXME: use actual fn pointers
2362                             Warning: naively computing the number of entries in the
2363                             vtable by counting the methods on the trait + methods on
2364                             all parent traits does not work, because some methods can
2365                             be not object safe and thus excluded from the vtable.
2366                             Increase this counter if you tried to implement this but
2367                             failed to do it without duplicating a lot of code from
2368                             other places in the compiler: 2
2369                             tcx.mk_tup(&[
2370                                 tcx.mk_array(tcx.types.usize, 3),
2371                                 tcx.mk_array(Option<fn()>),
2372                             ])
2373                             */
2374                         }
2375                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2376                     }
2377                 }
2378
2379                 // Arrays and slices.
2380                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2381                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2382
2383                 // Tuples, generators and closures.
2384                 ty::Closure(_, ref substs) => field_ty_or_layout(
2385                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2386                     cx,
2387                     i,
2388                 ),
2389
2390                 ty::Generator(def_id, ref substs, _) => match this.variants {
2391                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2392                         substs
2393                             .as_generator()
2394                             .state_tys(def_id, tcx)
2395                             .nth(index.as_usize())
2396                             .unwrap()
2397                             .nth(i)
2398                             .unwrap(),
2399                     ),
2400                     Variants::Multiple { tag, tag_field, .. } => {
2401                         if i == tag_field {
2402                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2403                         }
2404                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2405                     }
2406                 },
2407
2408                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2409
2410                 // ADTs.
2411                 ty::Adt(def, substs) => {
2412                     match this.variants {
2413                         Variants::Single { index } => {
2414                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2415                         }
2416
2417                         // Discriminant field for enums (where applicable).
2418                         Variants::Multiple { tag, .. } => {
2419                             assert_eq!(i, 0);
2420                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2421                         }
2422                     }
2423                 }
2424
2425                 ty::Projection(_)
2426                 | ty::Bound(..)
2427                 | ty::Placeholder(..)
2428                 | ty::Opaque(..)
2429                 | ty::Param(_)
2430                 | ty::Infer(_)
2431                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2432             }
2433         }
2434
2435         match field_ty_or_layout(this, cx, i) {
2436             TyMaybeWithLayout::Ty(field_ty) => {
2437                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2438                     bug!(
2439                         "failed to get layout for `{}`: {},\n\
2440                          despite it being a field (#{}) of an existing layout: {:#?}",
2441                         field_ty,
2442                         e,
2443                         i,
2444                         this
2445                     )
2446                 })
2447             }
2448             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2449         }
2450     }
2451
2452     fn ty_and_layout_pointee_info_at(
2453         this: TyAndLayout<'tcx>,
2454         cx: &C,
2455         offset: Size,
2456     ) -> Option<PointeeInfo> {
2457         let tcx = cx.tcx();
2458         let param_env = cx.param_env();
2459
2460         let addr_space_of_ty = |ty: Ty<'tcx>| {
2461             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2462         };
2463
2464         let pointee_info = match *this.ty.kind() {
2465             ty::RawPtr(mt) if offset.bytes() == 0 => {
2466                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2467                     size: layout.size,
2468                     align: layout.align.abi,
2469                     safe: None,
2470                     address_space: addr_space_of_ty(mt.ty),
2471                 })
2472             }
2473             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2474                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2475                     size: layout.size,
2476                     align: layout.align.abi,
2477                     safe: None,
2478                     address_space: cx.data_layout().instruction_address_space,
2479                 })
2480             }
2481             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2482                 let address_space = addr_space_of_ty(ty);
2483                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2484                     // Use conservative pointer kind if not optimizing. This saves us the
2485                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2486                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2487                     PointerKind::Shared
2488                 } else {
2489                     match mt {
2490                         hir::Mutability::Not => {
2491                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2492                                 PointerKind::Frozen
2493                             } else {
2494                                 PointerKind::Shared
2495                             }
2496                         }
2497                         hir::Mutability::Mut => {
2498                             // References to self-referential structures should not be considered
2499                             // noalias, as another pointer to the structure can be obtained, that
2500                             // is not based-on the original reference. We consider all !Unpin
2501                             // types to be potentially self-referential here.
2502                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2503                                 PointerKind::UniqueBorrowed
2504                             } else {
2505                                 PointerKind::Shared
2506                             }
2507                         }
2508                     }
2509                 };
2510
2511                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2512                     size: layout.size,
2513                     align: layout.align.abi,
2514                     safe: Some(kind),
2515                     address_space,
2516                 })
2517             }
2518
2519             _ => {
2520                 let mut data_variant = match this.variants {
2521                     // Within the discriminant field, only the niche itself is
2522                     // always initialized, so we only check for a pointer at its
2523                     // offset.
2524                     //
2525                     // If the niche is a pointer, it's either valid (according
2526                     // to its type), or null (which the niche field's scalar
2527                     // validity range encodes).  This allows using
2528                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2529                     // this will continue to work as long as we don't start
2530                     // using more niches than just null (e.g., the first page of
2531                     // the address space, or unaligned pointers).
2532                     Variants::Multiple {
2533                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2534                         tag_field,
2535                         ..
2536                     } if this.fields.offset(tag_field) == offset => {
2537                         Some(this.for_variant(cx, dataful_variant))
2538                     }
2539                     _ => Some(this),
2540                 };
2541
2542                 if let Some(variant) = data_variant {
2543                     // We're not interested in any unions.
2544                     if let FieldsShape::Union(_) = variant.fields {
2545                         data_variant = None;
2546                     }
2547                 }
2548
2549                 let mut result = None;
2550
2551                 if let Some(variant) = data_variant {
2552                     let ptr_end = offset + Pointer.size(cx);
2553                     for i in 0..variant.fields.count() {
2554                         let field_start = variant.fields.offset(i);
2555                         if field_start <= offset {
2556                             let field = variant.field(cx, i);
2557                             result = field.to_result().ok().and_then(|field| {
2558                                 if ptr_end <= field_start + field.size {
2559                                     // We found the right field, look inside it.
2560                                     let field_info =
2561                                         field.pointee_info_at(cx, offset - field_start);
2562                                     field_info
2563                                 } else {
2564                                     None
2565                                 }
2566                             });
2567                             if result.is_some() {
2568                                 break;
2569                             }
2570                         }
2571                     }
2572                 }
2573
2574                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2575                 if let Some(ref mut pointee) = result {
2576                     if let ty::Adt(def, _) = this.ty.kind() {
2577                         if def.is_box() && offset.bytes() == 0 {
2578                             pointee.safe = Some(PointerKind::UniqueOwned);
2579                         }
2580                     }
2581                 }
2582
2583                 result
2584             }
2585         };
2586
2587         debug!(
2588             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2589             offset,
2590             this.ty.kind(),
2591             pointee_info
2592         );
2593
2594         pointee_info
2595     }
2596
2597     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2598         matches!(this.ty.kind(), ty::Adt(..))
2599     }
2600
2601     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2602         this.ty.kind() == &ty::Never
2603     }
2604
2605     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2606         matches!(this.ty.kind(), ty::Tuple(..))
2607     }
2608
2609     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2610         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2611     }
2612 }
2613
2614 impl<'tcx> ty::Instance<'tcx> {
2615     // NOTE(eddyb) this is private to avoid using it from outside of
2616     // `fn_abi_of_instance` - any other uses are either too high-level
2617     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2618     // or should go through `FnAbi` instead, to avoid losing any
2619     // adjustments `fn_abi_of_instance` might be performing.
2620     fn fn_sig_for_fn_abi(
2621         &self,
2622         tcx: TyCtxt<'tcx>,
2623         param_env: ty::ParamEnv<'tcx>,
2624     ) -> ty::PolyFnSig<'tcx> {
2625         let ty = self.ty(tcx, param_env);
2626         match *ty.kind() {
2627             ty::FnDef(..) => {
2628                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2629                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2630                 // (i.e. due to being inside a projection that got normalized, see
2631                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2632                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2633                 let mut sig = match *ty.kind() {
2634                     ty::FnDef(def_id, substs) => tcx
2635                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2636                         .subst(tcx, substs),
2637                     _ => unreachable!(),
2638                 };
2639
2640                 if let ty::InstanceDef::VtableShim(..) = self.def {
2641                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2642                     sig = sig.map_bound(|mut sig| {
2643                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2644                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2645                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2646                         sig
2647                     });
2648                 }
2649                 sig
2650             }
2651             ty::Closure(def_id, substs) => {
2652                 let sig = substs.as_closure().sig();
2653
2654                 let bound_vars = tcx.mk_bound_variable_kinds(
2655                     sig.bound_vars()
2656                         .iter()
2657                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2658                 );
2659                 let br = ty::BoundRegion {
2660                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2661                     kind: ty::BoundRegionKind::BrEnv,
2662                 };
2663                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2664                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2665
2666                 let sig = sig.skip_binder();
2667                 ty::Binder::bind_with_vars(
2668                     tcx.mk_fn_sig(
2669                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2670                         sig.output(),
2671                         sig.c_variadic,
2672                         sig.unsafety,
2673                         sig.abi,
2674                     ),
2675                     bound_vars,
2676                 )
2677             }
2678             ty::Generator(_, substs, _) => {
2679                 let sig = substs.as_generator().poly_sig();
2680
2681                 let bound_vars = tcx.mk_bound_variable_kinds(
2682                     sig.bound_vars()
2683                         .iter()
2684                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2685                 );
2686                 let br = ty::BoundRegion {
2687                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2688                     kind: ty::BoundRegionKind::BrEnv,
2689                 };
2690                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2691                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2692
2693                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2694                 let pin_adt_ref = tcx.adt_def(pin_did);
2695                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2696                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2697
2698                 let sig = sig.skip_binder();
2699                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2700                 let state_adt_ref = tcx.adt_def(state_did);
2701                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2702                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2703                 ty::Binder::bind_with_vars(
2704                     tcx.mk_fn_sig(
2705                         [env_ty, sig.resume_ty].iter(),
2706                         &ret_ty,
2707                         false,
2708                         hir::Unsafety::Normal,
2709                         rustc_target::spec::abi::Abi::Rust,
2710                     ),
2711                     bound_vars,
2712                 )
2713             }
2714             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2715         }
2716     }
2717 }
2718
2719 /// Calculates whether a function's ABI can unwind or not.
2720 ///
2721 /// This takes two primary parameters:
2722 ///
2723 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2724 ///   codegen attrs for a defined function. For function pointers this set of
2725 ///   flags is the empty set. This is only applicable for Rust-defined
2726 ///   functions, and generally isn't needed except for small optimizations where
2727 ///   we try to say a function which otherwise might look like it could unwind
2728 ///   doesn't actually unwind (such as for intrinsics and such).
2729 ///
2730 /// * `abi` - this is the ABI that the function is defined with. This is the
2731 ///   primary factor for determining whether a function can unwind or not.
2732 ///
2733 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2734 /// panics are implemented with unwinds on most platform (when
2735 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2736 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2737 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2738 /// defined for each ABI individually, but it always corresponds to some form of
2739 /// stack-based unwinding (the exact mechanism of which varies
2740 /// platform-by-platform).
2741 ///
2742 /// Rust functions are classified whether or not they can unwind based on the
2743 /// active "panic strategy". In other words Rust functions are considered to
2744 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2745 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2746 /// only if the final panic mode is panic=abort. In this scenario any code
2747 /// previously compiled assuming that a function can unwind is still correct, it
2748 /// just never happens to actually unwind at runtime.
2749 ///
2750 /// This function's answer to whether or not a function can unwind is quite
2751 /// impactful throughout the compiler. This affects things like:
2752 ///
2753 /// * Calling a function which can't unwind means codegen simply ignores any
2754 ///   associated unwinding cleanup.
2755 /// * Calling a function which can unwind from a function which can't unwind
2756 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2757 ///   aborts the process.
2758 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2759 ///   affects various optimizations and codegen.
2760 ///
2761 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2762 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2763 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2764 /// might (from a foreign exception or similar).
2765 #[inline]
2766 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2767     if let Some(did) = fn_def_id {
2768         // Special attribute for functions which can't unwind.
2769         if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2770             return false;
2771         }
2772
2773         // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2774         //
2775         // This is not part of `codegen_fn_attrs` as it can differ between crates
2776         // and therefore cannot be computed in core.
2777         if tcx.sess.opts.debugging_opts.panic_in_drop == PanicStrategy::Abort {
2778             if Some(did) == tcx.lang_items().drop_in_place_fn() {
2779                 return false;
2780             }
2781         }
2782     }
2783
2784     // Otherwise if this isn't special then unwinding is generally determined by
2785     // the ABI of the itself. ABIs like `C` have variants which also
2786     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2787     // ABIs have such an option. Otherwise the only other thing here is Rust
2788     // itself, and those ABIs are determined by the panic strategy configured
2789     // for this compilation.
2790     //
2791     // Unfortunately at this time there's also another caveat. Rust [RFC
2792     // 2945][rfc] has been accepted and is in the process of being implemented
2793     // and stabilized. In this interim state we need to deal with historical
2794     // rustc behavior as well as plan for future rustc behavior.
2795     //
2796     // Historically functions declared with `extern "C"` were marked at the
2797     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2798     // or not. This is UB for functions in `panic=unwind` mode that then
2799     // actually panic and unwind. Note that this behavior is true for both
2800     // externally declared functions as well as Rust-defined function.
2801     //
2802     // To fix this UB rustc would like to change in the future to catch unwinds
2803     // from function calls that may unwind within a Rust-defined `extern "C"`
2804     // function and forcibly abort the process, thereby respecting the
2805     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2806     // ready to roll out, so determining whether or not the `C` family of ABIs
2807     // unwinds is conditional not only on their definition but also whether the
2808     // `#![feature(c_unwind)]` feature gate is active.
2809     //
2810     // Note that this means that unlike historical compilers rustc now, by
2811     // default, unconditionally thinks that the `C` ABI may unwind. This will
2812     // prevent some optimization opportunities, however, so we try to scope this
2813     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2814     // to `panic=abort`).
2815     //
2816     // Eventually the check against `c_unwind` here will ideally get removed and
2817     // this'll be a little cleaner as it'll be a straightforward check of the
2818     // ABI.
2819     //
2820     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2821     use SpecAbi::*;
2822     match abi {
2823         C { unwind }
2824         | System { unwind }
2825         | Cdecl { unwind }
2826         | Stdcall { unwind }
2827         | Fastcall { unwind }
2828         | Vectorcall { unwind }
2829         | Thiscall { unwind }
2830         | Aapcs { unwind }
2831         | Win64 { unwind }
2832         | SysV64 { unwind } => {
2833             unwind
2834                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2835         }
2836         PtxKernel
2837         | Msp430Interrupt
2838         | X86Interrupt
2839         | AmdGpuKernel
2840         | EfiApi
2841         | AvrInterrupt
2842         | AvrNonBlockingInterrupt
2843         | CCmseNonSecureCall
2844         | Wasm
2845         | RustIntrinsic
2846         | PlatformIntrinsic
2847         | Unadjusted => false,
2848         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2849     }
2850 }
2851
2852 #[inline]
2853 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2854     use rustc_target::spec::abi::Abi::*;
2855     match tcx.sess.target.adjust_abi(abi) {
2856         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2857
2858         // It's the ABI's job to select this, not ours.
2859         System { .. } => bug!("system abi should be selected elsewhere"),
2860         EfiApi => bug!("eficall abi should be selected elsewhere"),
2861
2862         Stdcall { .. } => Conv::X86Stdcall,
2863         Fastcall { .. } => Conv::X86Fastcall,
2864         Vectorcall { .. } => Conv::X86VectorCall,
2865         Thiscall { .. } => Conv::X86ThisCall,
2866         C { .. } => Conv::C,
2867         Unadjusted => Conv::C,
2868         Win64 { .. } => Conv::X86_64Win64,
2869         SysV64 { .. } => Conv::X86_64SysV,
2870         Aapcs { .. } => Conv::ArmAapcs,
2871         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2872         PtxKernel => Conv::PtxKernel,
2873         Msp430Interrupt => Conv::Msp430Intr,
2874         X86Interrupt => Conv::X86Intr,
2875         AmdGpuKernel => Conv::AmdGpuKernel,
2876         AvrInterrupt => Conv::AvrInterrupt,
2877         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2878         Wasm => Conv::C,
2879
2880         // These API constants ought to be more specific...
2881         Cdecl { .. } => Conv::C,
2882     }
2883 }
2884
2885 /// Error produced by attempting to compute or adjust a `FnAbi`.
2886 #[derive(Copy, Clone, Debug, HashStable)]
2887 pub enum FnAbiError<'tcx> {
2888     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2889     Layout(LayoutError<'tcx>),
2890
2891     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2892     AdjustForForeignAbi(call::AdjustForForeignAbiError),
2893 }
2894
2895 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2896     fn from(err: LayoutError<'tcx>) -> Self {
2897         Self::Layout(err)
2898     }
2899 }
2900
2901 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2902     fn from(err: call::AdjustForForeignAbiError) -> Self {
2903         Self::AdjustForForeignAbi(err)
2904     }
2905 }
2906
2907 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2908     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2909         match self {
2910             Self::Layout(err) => err.fmt(f),
2911             Self::AdjustForForeignAbi(err) => err.fmt(f),
2912         }
2913     }
2914 }
2915
2916 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2917 // just for error handling.
2918 #[derive(Debug)]
2919 pub enum FnAbiRequest<'tcx> {
2920     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2921     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2922 }
2923
2924 /// Trait for contexts that want to be able to compute `FnAbi`s.
2925 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2926 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2927     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2928     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2929     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2930
2931     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2932     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2933     ///
2934     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2935     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2936     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2937     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2938     fn handle_fn_abi_err(
2939         &self,
2940         err: FnAbiError<'tcx>,
2941         span: Span,
2942         fn_abi_request: FnAbiRequest<'tcx>,
2943     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2944 }
2945
2946 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2947 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2948     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2949     ///
2950     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2951     /// instead, where the instance is an `InstanceDef::Virtual`.
2952     #[inline]
2953     fn fn_abi_of_fn_ptr(
2954         &self,
2955         sig: ty::PolyFnSig<'tcx>,
2956         extra_args: &'tcx ty::List<Ty<'tcx>>,
2957     ) -> Self::FnAbiOfResult {
2958         // FIXME(eddyb) get a better `span` here.
2959         let span = self.layout_tcx_at_span();
2960         let tcx = self.tcx().at(span);
2961
2962         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2963             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2964         ))
2965     }
2966
2967     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2968     /// direct calls to an `fn`.
2969     ///
2970     /// NB: that includes virtual calls, which are represented by "direct calls"
2971     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2972     #[inline]
2973     fn fn_abi_of_instance(
2974         &self,
2975         instance: ty::Instance<'tcx>,
2976         extra_args: &'tcx ty::List<Ty<'tcx>>,
2977     ) -> Self::FnAbiOfResult {
2978         // FIXME(eddyb) get a better `span` here.
2979         let span = self.layout_tcx_at_span();
2980         let tcx = self.tcx().at(span);
2981
2982         MaybeResult::from(
2983             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2984                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2985                 // we can get some kind of span even if one wasn't provided.
2986                 // However, we don't do this early in order to avoid calling
2987                 // `def_span` unconditionally (which may have a perf penalty).
2988                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2989                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2990             }),
2991         )
2992     }
2993 }
2994
2995 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2996
2997 fn fn_abi_of_fn_ptr<'tcx>(
2998     tcx: TyCtxt<'tcx>,
2999     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3000 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3001     let (param_env, (sig, extra_args)) = query.into_parts();
3002
3003     LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3004 }
3005
3006 fn fn_abi_of_instance<'tcx>(
3007     tcx: TyCtxt<'tcx>,
3008     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3009 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3010     let (param_env, (instance, extra_args)) = query.into_parts();
3011
3012     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3013
3014     let caller_location = if instance.def.requires_caller_location(tcx) {
3015         Some(tcx.caller_location_ty())
3016     } else {
3017         None
3018     };
3019
3020     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3021         sig,
3022         extra_args,
3023         caller_location,
3024         Some(instance.def_id()),
3025         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3026     )
3027 }
3028
3029 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3030     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3031     // arguments of this method, into a separate `struct`.
3032     fn fn_abi_new_uncached(
3033         &self,
3034         sig: ty::PolyFnSig<'tcx>,
3035         extra_args: &[Ty<'tcx>],
3036         caller_location: Option<Ty<'tcx>>,
3037         fn_def_id: Option<DefId>,
3038         // FIXME(eddyb) replace this with something typed, like an `enum`.
3039         force_thin_self_ptr: bool,
3040     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3041         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3042
3043         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3044
3045         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3046
3047         let mut inputs = sig.inputs();
3048         let extra_args = if sig.abi == RustCall {
3049             assert!(!sig.c_variadic && extra_args.is_empty());
3050
3051             if let Some(input) = sig.inputs().last() {
3052                 if let ty::Tuple(tupled_arguments) = input.kind() {
3053                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3054                     tupled_arguments
3055                 } else {
3056                     bug!(
3057                         "argument to function with \"rust-call\" ABI \
3058                             is not a tuple"
3059                     );
3060                 }
3061             } else {
3062                 bug!(
3063                     "argument to function with \"rust-call\" ABI \
3064                         is not a tuple"
3065                 );
3066             }
3067         } else {
3068             assert!(sig.c_variadic || extra_args.is_empty());
3069             extra_args
3070         };
3071
3072         let target = &self.tcx.sess.target;
3073         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3074         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3075         let linux_s390x_gnu_like =
3076             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3077         let linux_sparc64_gnu_like =
3078             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3079         let linux_powerpc_gnu_like =
3080             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3081         use SpecAbi::*;
3082         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3083
3084         // Handle safe Rust thin and fat pointers.
3085         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3086                                       scalar: Scalar,
3087                                       layout: TyAndLayout<'tcx>,
3088                                       offset: Size,
3089                                       is_return: bool| {
3090             // Booleans are always a noundef i1 that needs to be zero-extended.
3091             if scalar.is_bool() {
3092                 attrs.ext(ArgExtension::Zext);
3093                 attrs.set(ArgAttribute::NoUndef);
3094                 return;
3095             }
3096
3097             // Scalars which have invalid values cannot be undef.
3098             if !scalar.is_always_valid(self) {
3099                 attrs.set(ArgAttribute::NoUndef);
3100             }
3101
3102             // Only pointer types handled below.
3103             let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3104
3105             if !valid_range.contains(0) {
3106                 attrs.set(ArgAttribute::NonNull);
3107             }
3108
3109             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3110                 if let Some(kind) = pointee.safe {
3111                     attrs.pointee_align = Some(pointee.align);
3112
3113                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3114                     // for the entire duration of the function as they can be deallocated
3115                     // at any time. Set their valid size to 0.
3116                     attrs.pointee_size = match kind {
3117                         PointerKind::UniqueOwned => Size::ZERO,
3118                         _ => pointee.size,
3119                     };
3120
3121                     // `Box`, `&T`, and `&mut T` cannot be undef.
3122                     // Note that this only applies to the value of the pointer itself;
3123                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3124                     attrs.set(ArgAttribute::NoUndef);
3125
3126                     // `Box` pointer parameters never alias because ownership is transferred
3127                     // `&mut` pointer parameters never alias other parameters,
3128                     // or mutable global data
3129                     //
3130                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3131                     // and can be marked as both `readonly` and `noalias`, as
3132                     // LLVM's definition of `noalias` is based solely on memory
3133                     // dependencies rather than pointer equality
3134                     //
3135                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3136                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3137                     // or not to actually emit the attribute. It can also be controlled with the
3138                     // `-Zmutable-noalias` debugging option.
3139                     let no_alias = match kind {
3140                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3141                         PointerKind::UniqueOwned => true,
3142                         PointerKind::Frozen => !is_return,
3143                     };
3144                     if no_alias {
3145                         attrs.set(ArgAttribute::NoAlias);
3146                     }
3147
3148                     if kind == PointerKind::Frozen && !is_return {
3149                         attrs.set(ArgAttribute::ReadOnly);
3150                     }
3151
3152                     if kind == PointerKind::UniqueBorrowed && !is_return {
3153                         attrs.set(ArgAttribute::NoAliasMutRef);
3154                     }
3155                 }
3156             }
3157         };
3158
3159         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3160             let is_return = arg_idx.is_none();
3161
3162             let layout = self.layout_of(ty)?;
3163             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3164                 // Don't pass the vtable, it's not an argument of the virtual fn.
3165                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3166                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3167                 make_thin_self_ptr(self, layout)
3168             } else {
3169                 layout
3170             };
3171
3172             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3173                 let mut attrs = ArgAttributes::new();
3174                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3175                 attrs
3176             });
3177
3178             if arg.layout.is_zst() {
3179                 // For some forsaken reason, x86_64-pc-windows-gnu
3180                 // doesn't ignore zero-sized struct arguments.
3181                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3182                 if is_return
3183                     || rust_abi
3184                     || (!win_x64_gnu
3185                         && !linux_s390x_gnu_like
3186                         && !linux_sparc64_gnu_like
3187                         && !linux_powerpc_gnu_like)
3188                 {
3189                     arg.mode = PassMode::Ignore;
3190                 }
3191             }
3192
3193             Ok(arg)
3194         };
3195
3196         let mut fn_abi = FnAbi {
3197             ret: arg_of(sig.output(), None)?,
3198             args: inputs
3199                 .iter()
3200                 .copied()
3201                 .chain(extra_args.iter().copied())
3202                 .chain(caller_location)
3203                 .enumerate()
3204                 .map(|(i, ty)| arg_of(ty, Some(i)))
3205                 .collect::<Result<_, _>>()?,
3206             c_variadic: sig.c_variadic,
3207             fixed_count: inputs.len(),
3208             conv,
3209             can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3210         };
3211         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3212         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3213         Ok(self.tcx.arena.alloc(fn_abi))
3214     }
3215
3216     fn fn_abi_adjust_for_abi(
3217         &self,
3218         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3219         abi: SpecAbi,
3220     ) -> Result<(), FnAbiError<'tcx>> {
3221         if abi == SpecAbi::Unadjusted {
3222             return Ok(());
3223         }
3224
3225         if abi == SpecAbi::Rust
3226             || abi == SpecAbi::RustCall
3227             || abi == SpecAbi::RustIntrinsic
3228             || abi == SpecAbi::PlatformIntrinsic
3229         {
3230             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3231                 if arg.is_ignore() {
3232                     return;
3233                 }
3234
3235                 match arg.layout.abi {
3236                     Abi::Aggregate { .. } => {}
3237
3238                     // This is a fun case! The gist of what this is doing is
3239                     // that we want callers and callees to always agree on the
3240                     // ABI of how they pass SIMD arguments. If we were to *not*
3241                     // make these arguments indirect then they'd be immediates
3242                     // in LLVM, which means that they'd used whatever the
3243                     // appropriate ABI is for the callee and the caller. That
3244                     // means, for example, if the caller doesn't have AVX
3245                     // enabled but the callee does, then passing an AVX argument
3246                     // across this boundary would cause corrupt data to show up.
3247                     //
3248                     // This problem is fixed by unconditionally passing SIMD
3249                     // arguments through memory between callers and callees
3250                     // which should get them all to agree on ABI regardless of
3251                     // target feature sets. Some more information about this
3252                     // issue can be found in #44367.
3253                     //
3254                     // Note that the platform intrinsic ABI is exempt here as
3255                     // that's how we connect up to LLVM and it's unstable
3256                     // anyway, we control all calls to it in libstd.
3257                     Abi::Vector { .. }
3258                         if abi != SpecAbi::PlatformIntrinsic
3259                             && self.tcx.sess.target.simd_types_indirect =>
3260                     {
3261                         arg.make_indirect();
3262                         return;
3263                     }
3264
3265                     _ => return,
3266                 }
3267
3268                 let size = arg.layout.size;
3269                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3270                     arg.make_indirect();
3271                 } else {
3272                     // We want to pass small aggregates as immediates, but using
3273                     // a LLVM aggregate type for this leads to bad optimizations,
3274                     // so we pick an appropriately sized integer type instead.
3275                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3276                 }
3277             };
3278             fixup(&mut fn_abi.ret);
3279             for arg in &mut fn_abi.args {
3280                 fixup(arg);
3281             }
3282         } else {
3283             fn_abi.adjust_for_foreign_abi(self, abi)?;
3284         }
3285
3286         Ok(())
3287     }
3288 }
3289
3290 fn make_thin_self_ptr<'tcx>(
3291     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3292     layout: TyAndLayout<'tcx>,
3293 ) -> TyAndLayout<'tcx> {
3294     let tcx = cx.tcx();
3295     let fat_pointer_ty = if layout.is_unsized() {
3296         // unsized `self` is passed as a pointer to `self`
3297         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3298         tcx.mk_mut_ptr(layout.ty)
3299     } else {
3300         match layout.abi {
3301             Abi::ScalarPair(..) => (),
3302             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3303         }
3304
3305         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3306         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3307         // elsewhere in the compiler as a method on a `dyn Trait`.
3308         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3309         // get a built-in pointer type
3310         let mut fat_pointer_layout = layout;
3311         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3312             && !fat_pointer_layout.ty.is_region_ptr()
3313         {
3314             for i in 0..fat_pointer_layout.fields.count() {
3315                 let field_layout = fat_pointer_layout.field(cx, i);
3316
3317                 if !field_layout.is_zst() {
3318                     fat_pointer_layout = field_layout;
3319                     continue 'descend_newtypes;
3320                 }
3321             }
3322
3323             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3324         }
3325
3326         fat_pointer_layout.ty
3327     };
3328
3329     // we now have a type like `*mut RcBox<dyn Trait>`
3330     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3331     // this is understood as a special case elsewhere in the compiler
3332     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3333
3334     TyAndLayout {
3335         ty: fat_pointer_ty,
3336
3337         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3338         // should always work because the type is always `*mut ()`.
3339         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3340     }
3341 }