]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #95260 - compiler-errors:fn, r=davidtwco
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_data_structures::intern::Interned;
9 use rustc_hir as hir;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
18 };
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
21
22 use std::cmp;
23 use std::fmt;
24 use std::iter;
25 use std::num::NonZeroUsize;
26 use std::ops::Bound;
27
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
30
31 pub fn provide(providers: &mut ty::query::Providers) {
32     *providers =
33         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
34 }
35
36 pub trait IntegerExt {
37     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
41     fn repr_discr<'tcx>(
42         tcx: TyCtxt<'tcx>,
43         ty: Ty<'tcx>,
44         repr: &ReprOptions,
45         min: i128,
46         max: i128,
47     ) -> (Integer, bool);
48 }
49
50 impl IntegerExt for Integer {
51     #[inline]
52     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53         match (*self, signed) {
54             (I8, false) => tcx.types.u8,
55             (I16, false) => tcx.types.u16,
56             (I32, false) => tcx.types.u32,
57             (I64, false) => tcx.types.u64,
58             (I128, false) => tcx.types.u128,
59             (I8, true) => tcx.types.i8,
60             (I16, true) => tcx.types.i16,
61             (I32, true) => tcx.types.i32,
62             (I64, true) => tcx.types.i64,
63             (I128, true) => tcx.types.i128,
64         }
65     }
66
67     /// Gets the Integer type from an attr::IntType.
68     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69         let dl = cx.data_layout();
70
71         match ity {
72             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78                 dl.ptr_sized_integer()
79             }
80         }
81     }
82
83     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
84         match ity {
85             ty::IntTy::I8 => I8,
86             ty::IntTy::I16 => I16,
87             ty::IntTy::I32 => I32,
88             ty::IntTy::I64 => I64,
89             ty::IntTy::I128 => I128,
90             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
91         }
92     }
93     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
94         match ity {
95             ty::UintTy::U8 => I8,
96             ty::UintTy::U16 => I16,
97             ty::UintTy::U32 => I32,
98             ty::UintTy::U64 => I64,
99             ty::UintTy::U128 => I128,
100             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
101         }
102     }
103
104     /// Finds the appropriate Integer type and signedness for the given
105     /// signed discriminant range and `#[repr]` attribute.
106     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107     /// that shouldn't affect anything, other than maybe debuginfo.
108     fn repr_discr<'tcx>(
109         tcx: TyCtxt<'tcx>,
110         ty: Ty<'tcx>,
111         repr: &ReprOptions,
112         min: i128,
113         max: i128,
114     ) -> (Integer, bool) {
115         // Theoretically, negative values could be larger in unsigned representation
116         // than the unsigned representation of the signed minimum. However, if there
117         // are any negative values, the only valid unsigned representation is u128
118         // which can fit all i128 values, so the result remains unaffected.
119         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
121
122         if let Some(ity) = repr.int {
123             let discr = Integer::from_attr(&tcx, ity);
124             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
125             if discr < fit {
126                 bug!(
127                     "Integer::repr_discr: `#[repr]` hint too small for \
128                       discriminant range of enum `{}",
129                     ty
130                 )
131             }
132             return (discr, ity.is_signed());
133         }
134
135         let at_least = if repr.c() {
136             // This is usually I32, however it can be different on some platforms,
137             // notably hexagon and arm-none/thumb-none
138             tcx.data_layout().c_enum_min_size
139         } else {
140             // repr(Rust) enums try to be as small as possible
141             I8
142         };
143
144         // If there are no negative values, we can use the unsigned fit.
145         if min >= 0 {
146             (cmp::max(unsigned_fit, at_least), false)
147         } else {
148             (cmp::max(signed_fit, at_least), true)
149         }
150     }
151 }
152
153 pub trait PrimitiveExt {
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 }
157
158 impl PrimitiveExt for Primitive {
159     #[inline]
160     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161         match *self {
162             Int(i, signed) => i.to_ty(tcx, signed),
163             F32 => tcx.types.f32,
164             F64 => tcx.types.f64,
165             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
166         }
167     }
168
169     /// Return an *integer* type matching this primitive.
170     /// Useful in particular when dealing with enum discriminants.
171     #[inline]
172     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173         match *self {
174             Int(i, signed) => i.to_ty(tcx, signed),
175             Pointer => tcx.types.usize,
176             F32 | F64 => bug!("floats do not have an int type"),
177         }
178     }
179 }
180
181 /// The first half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
186
187 /// The second half of a fat pointer.
188 ///
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
192
193 /// The maximum supported number of lanes in a SIMD vector.
194 ///
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
202     Unknown(Ty<'tcx>),
203     SizeOverflow(Ty<'tcx>),
204     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
205 }
206
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209         match *self {
210             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211             LayoutError::SizeOverflow(ty) => {
212                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213             }
214             LayoutError::NormalizationFailure(t, e) => write!(
215                 f,
216                 "unable to determine layout for `{}` because `{}` cannot be normalized",
217                 t,
218                 e.get_type_for_failure()
219             ),
220         }
221     }
222 }
223
224 #[instrument(skip(tcx, query), level = "debug")]
225 fn layout_of<'tcx>(
226     tcx: TyCtxt<'tcx>,
227     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
228 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
229     ty::tls::with_related_context(tcx, move |icx| {
230         let (param_env, ty) = query.into_parts();
231         debug!(?ty);
232
233         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
234             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
235         }
236
237         // Update the ImplicitCtxt to increase the layout_depth
238         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
239
240         ty::tls::enter_context(&icx, |_| {
241             let param_env = param_env.with_reveal_all_normalized(tcx);
242             let unnormalized_ty = ty;
243
244             // FIXME: We might want to have two different versions of `layout_of`:
245             // One that can be called after typecheck has completed and can use
246             // `normalize_erasing_regions` here and another one that can be called
247             // before typecheck has completed and uses `try_normalize_erasing_regions`.
248             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
249                 Ok(t) => t,
250                 Err(normalization_error) => {
251                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
252                 }
253             };
254
255             if ty != unnormalized_ty {
256                 // Ensure this layout is also cached for the normalized type.
257                 return tcx.layout_of(param_env.and(ty));
258             }
259
260             let cx = LayoutCx { tcx, param_env };
261
262             let layout = cx.layout_of_uncached(ty)?;
263             let layout = TyAndLayout { ty, layout };
264
265             cx.record_layout_for_printing(layout);
266
267             // Type-level uninhabitedness should always imply ABI uninhabitedness.
268             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
269                 assert!(layout.abi.is_uninhabited());
270             }
271
272             Ok(layout)
273         })
274     })
275 }
276
277 pub struct LayoutCx<'tcx, C> {
278     pub tcx: C,
279     pub param_env: ty::ParamEnv<'tcx>,
280 }
281
282 #[derive(Copy, Clone, Debug)]
283 enum StructKind {
284     /// A tuple, closure, or univariant which cannot be coerced to unsized.
285     AlwaysSized,
286     /// A univariant, the last field of which may be coerced to unsized.
287     MaybeUnsized,
288     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
289     Prefixed(Size, Align),
290 }
291
292 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
293 // This is used to go between `memory_index` (source field order to memory order)
294 // and `inverse_memory_index` (memory order to source field order).
295 // See also `FieldsShape::Arbitrary::memory_index` for more details.
296 // FIXME(eddyb) build a better abstraction for permutations, if possible.
297 fn invert_mapping(map: &[u32]) -> Vec<u32> {
298     let mut inverse = vec![0; map.len()];
299     for i in 0..map.len() {
300         inverse[map[i] as usize] = i as u32;
301     }
302     inverse
303 }
304
305 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
306     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
307         let dl = self.data_layout();
308         let b_align = b.value.align(dl);
309         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
310         let b_offset = a.value.size(dl).align_to(b_align.abi);
311         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
312
313         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
314         // returns the last maximum.
315         let largest_niche = Niche::from_scalar(dl, b_offset, b)
316             .into_iter()
317             .chain(Niche::from_scalar(dl, Size::ZERO, a))
318             .max_by_key(|niche| niche.available(dl));
319
320         LayoutS {
321             variants: Variants::Single { index: VariantIdx::new(0) },
322             fields: FieldsShape::Arbitrary {
323                 offsets: vec![Size::ZERO, b_offset],
324                 memory_index: vec![0, 1],
325             },
326             abi: Abi::ScalarPair(a, b),
327             largest_niche,
328             align,
329             size,
330         }
331     }
332
333     fn univariant_uninterned(
334         &self,
335         ty: Ty<'tcx>,
336         fields: &[TyAndLayout<'_>],
337         repr: &ReprOptions,
338         kind: StructKind,
339     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
340         let dl = self.data_layout();
341         let pack = repr.pack;
342         if pack.is_some() && repr.align.is_some() {
343             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
344             return Err(LayoutError::Unknown(ty));
345         }
346
347         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
348
349         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
350
351         let optimize = !repr.inhibit_struct_field_reordering_opt();
352         if optimize {
353             let end =
354                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
355             let optimizing = &mut inverse_memory_index[..end];
356             let field_align = |f: &TyAndLayout<'_>| {
357                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
358             };
359
360             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
361             // the field ordering to try and catch some code making assumptions about layouts
362             // we don't guarantee
363             if repr.can_randomize_type_layout() {
364                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
365                 // randomize field ordering with
366                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
367
368                 // Shuffle the ordering of the fields
369                 optimizing.shuffle(&mut rng);
370
371             // Otherwise we just leave things alone and actually optimize the type's fields
372             } else {
373                 match kind {
374                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
375                         optimizing.sort_by_key(|&x| {
376                             // Place ZSTs first to avoid "interesting offsets",
377                             // especially with only one or two non-ZST fields.
378                             let f = &fields[x as usize];
379                             (!f.is_zst(), cmp::Reverse(field_align(f)))
380                         });
381                     }
382
383                     StructKind::Prefixed(..) => {
384                         // Sort in ascending alignment so that the layout stays optimal
385                         // regardless of the prefix
386                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
387                     }
388                 }
389
390                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
391                 //                 regardless of the status of `-Z randomize-layout`
392             }
393         }
394
395         // inverse_memory_index holds field indices by increasing memory offset.
396         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
397         // We now write field offsets to the corresponding offset slot;
398         // field 5 with offset 0 puts 0 in offsets[5].
399         // At the bottom of this function, we invert `inverse_memory_index` to
400         // produce `memory_index` (see `invert_mapping`).
401
402         let mut sized = true;
403         let mut offsets = vec![Size::ZERO; fields.len()];
404         let mut offset = Size::ZERO;
405         let mut largest_niche = None;
406         let mut largest_niche_available = 0;
407
408         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
409             let prefix_align =
410                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
411             align = align.max(AbiAndPrefAlign::new(prefix_align));
412             offset = prefix_size.align_to(prefix_align);
413         }
414
415         for &i in &inverse_memory_index {
416             let field = fields[i as usize];
417             if !sized {
418                 self.tcx.sess.delay_span_bug(
419                     DUMMY_SP,
420                     &format!(
421                         "univariant: field #{} of `{}` comes after unsized field",
422                         offsets.len(),
423                         ty
424                     ),
425                 );
426             }
427
428             if field.is_unsized() {
429                 sized = false;
430             }
431
432             // Invariant: offset < dl.obj_size_bound() <= 1<<61
433             let field_align = if let Some(pack) = pack {
434                 field.align.min(AbiAndPrefAlign::new(pack))
435             } else {
436                 field.align
437             };
438             offset = offset.align_to(field_align.abi);
439             align = align.max(field_align);
440
441             debug!("univariant offset: {:?} field: {:#?}", offset, field);
442             offsets[i as usize] = offset;
443
444             if !repr.hide_niche() {
445                 if let Some(mut niche) = field.largest_niche {
446                     let available = niche.available(dl);
447                     if available > largest_niche_available {
448                         largest_niche_available = available;
449                         niche.offset += offset;
450                         largest_niche = Some(niche);
451                     }
452                 }
453             }
454
455             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
456         }
457
458         if let Some(repr_align) = repr.align {
459             align = align.max(AbiAndPrefAlign::new(repr_align));
460         }
461
462         debug!("univariant min_size: {:?}", offset);
463         let min_size = offset;
464
465         // As stated above, inverse_memory_index holds field indices by increasing offset.
466         // This makes it an already-sorted view of the offsets vec.
467         // To invert it, consider:
468         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
469         // Field 5 would be the first element, so memory_index is i:
470         // Note: if we didn't optimize, it's already right.
471
472         let memory_index =
473             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
474
475         let size = min_size.align_to(align.abi);
476         let mut abi = Abi::Aggregate { sized };
477
478         // Unpack newtype ABIs and find scalar pairs.
479         if sized && size.bytes() > 0 {
480             // All other fields must be ZSTs.
481             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
482
483             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
484                 // We have exactly one non-ZST field.
485                 (Some((i, field)), None, None) => {
486                     // Field fills the struct and it has a scalar or scalar pair ABI.
487                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
488                     {
489                         match field.abi {
490                             // For plain scalars, or vectors of them, we can't unpack
491                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
492                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
493                                 abi = field.abi;
494                             }
495                             // But scalar pairs are Rust-specific and get
496                             // treated as aggregates by C ABIs anyway.
497                             Abi::ScalarPair(..) => {
498                                 abi = field.abi;
499                             }
500                             _ => {}
501                         }
502                     }
503                 }
504
505                 // Two non-ZST fields, and they're both scalars.
506                 (
507                     Some((
508                         i,
509                         &TyAndLayout {
510                             layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(a), .. }, _)),
511                             ..
512                         },
513                     )),
514                     Some((
515                         j,
516                         &TyAndLayout {
517                             layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(b), .. }, _)),
518                             ..
519                         },
520                     )),
521                     None,
522                 ) => {
523                     // Order by the memory placement, not source order.
524                     let ((i, a), (j, b)) =
525                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
526                     let pair = self.scalar_pair(a, b);
527                     let pair_offsets = match pair.fields {
528                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
529                             assert_eq!(memory_index, &[0, 1]);
530                             offsets
531                         }
532                         _ => bug!(),
533                     };
534                     if offsets[i] == pair_offsets[0]
535                         && offsets[j] == pair_offsets[1]
536                         && align == pair.align
537                         && size == pair.size
538                     {
539                         // We can use `ScalarPair` only when it matches our
540                         // already computed layout (including `#[repr(C)]`).
541                         abi = pair.abi;
542                     }
543                 }
544
545                 _ => {}
546             }
547         }
548
549         if fields.iter().any(|f| f.abi.is_uninhabited()) {
550             abi = Abi::Uninhabited;
551         }
552
553         Ok(LayoutS {
554             variants: Variants::Single { index: VariantIdx::new(0) },
555             fields: FieldsShape::Arbitrary { offsets, memory_index },
556             abi,
557             largest_niche,
558             align,
559             size,
560         })
561     }
562
563     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
564         let tcx = self.tcx;
565         let param_env = self.param_env;
566         let dl = self.data_layout();
567         let scalar_unit = |value: Primitive| {
568             let size = value.size(dl);
569             assert!(size.bits() <= 128);
570             Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } }
571         };
572         let scalar =
573             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
574
575         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
576             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
577         };
578         debug_assert!(!ty.has_infer_types_or_consts());
579
580         Ok(match *ty.kind() {
581             // Basic scalars.
582             ty::Bool => tcx.intern_layout(LayoutS::scalar(
583                 self,
584                 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
585             )),
586             ty::Char => tcx.intern_layout(LayoutS::scalar(
587                 self,
588                 Scalar {
589                     value: Int(I32, false),
590                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
591                 },
592             )),
593             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
594             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
595             ty::Float(fty) => scalar(match fty {
596                 ty::FloatTy::F32 => F32,
597                 ty::FloatTy::F64 => F64,
598             }),
599             ty::FnPtr(_) => {
600                 let mut ptr = scalar_unit(Pointer);
601                 ptr.valid_range = ptr.valid_range.with_start(1);
602                 tcx.intern_layout(LayoutS::scalar(self, ptr))
603             }
604
605             // The never type.
606             ty::Never => tcx.intern_layout(LayoutS {
607                 variants: Variants::Single { index: VariantIdx::new(0) },
608                 fields: FieldsShape::Primitive,
609                 abi: Abi::Uninhabited,
610                 largest_niche: None,
611                 align: dl.i8_align,
612                 size: Size::ZERO,
613             }),
614
615             // Potentially-wide pointers.
616             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
617                 let mut data_ptr = scalar_unit(Pointer);
618                 if !ty.is_unsafe_ptr() {
619                     data_ptr.valid_range = data_ptr.valid_range.with_start(1);
620                 }
621
622                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
623                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
624                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
625                 }
626
627                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
628                 let metadata = match unsized_part.kind() {
629                     ty::Foreign(..) => {
630                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
631                     }
632                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
633                     ty::Dynamic(..) => {
634                         let mut vtable = scalar_unit(Pointer);
635                         vtable.valid_range = vtable.valid_range.with_start(1);
636                         vtable
637                     }
638                     _ => return Err(LayoutError::Unknown(unsized_part)),
639                 };
640
641                 // Effectively a (ptr, meta) tuple.
642                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
643             }
644
645             // Arrays and slices.
646             ty::Array(element, mut count) => {
647                 if count.has_projections() {
648                     count = tcx.normalize_erasing_regions(param_env, count);
649                     if count.has_projections() {
650                         return Err(LayoutError::Unknown(ty));
651                     }
652                 }
653
654                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
655                 let element = self.layout_of(element)?;
656                 let size =
657                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
658
659                 let abi =
660                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
661                         Abi::Uninhabited
662                     } else {
663                         Abi::Aggregate { sized: true }
664                     };
665
666                 let largest_niche = if count != 0 { element.largest_niche } else { None };
667
668                 tcx.intern_layout(LayoutS {
669                     variants: Variants::Single { index: VariantIdx::new(0) },
670                     fields: FieldsShape::Array { stride: element.size, count },
671                     abi,
672                     largest_niche,
673                     align: element.align,
674                     size,
675                 })
676             }
677             ty::Slice(element) => {
678                 let element = self.layout_of(element)?;
679                 tcx.intern_layout(LayoutS {
680                     variants: Variants::Single { index: VariantIdx::new(0) },
681                     fields: FieldsShape::Array { stride: element.size, count: 0 },
682                     abi: Abi::Aggregate { sized: false },
683                     largest_niche: None,
684                     align: element.align,
685                     size: Size::ZERO,
686                 })
687             }
688             ty::Str => tcx.intern_layout(LayoutS {
689                 variants: Variants::Single { index: VariantIdx::new(0) },
690                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
691                 abi: Abi::Aggregate { sized: false },
692                 largest_niche: None,
693                 align: dl.i8_align,
694                 size: Size::ZERO,
695             }),
696
697             // Odd unit types.
698             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
699             ty::Dynamic(..) | ty::Foreign(..) => {
700                 let mut unit = self.univariant_uninterned(
701                     ty,
702                     &[],
703                     &ReprOptions::default(),
704                     StructKind::AlwaysSized,
705                 )?;
706                 match unit.abi {
707                     Abi::Aggregate { ref mut sized } => *sized = false,
708                     _ => bug!(),
709                 }
710                 tcx.intern_layout(unit)
711             }
712
713             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
714
715             ty::Closure(_, ref substs) => {
716                 let tys = substs.as_closure().upvar_tys();
717                 univariant(
718                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
719                     &ReprOptions::default(),
720                     StructKind::AlwaysSized,
721                 )?
722             }
723
724             ty::Tuple(tys) => {
725                 let kind =
726                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
727
728                 univariant(
729                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
730                     &ReprOptions::default(),
731                     kind,
732                 )?
733             }
734
735             // SIMD vector types.
736             ty::Adt(def, substs) if def.repr().simd() => {
737                 if !def.is_struct() {
738                     // Should have yielded E0517 by now.
739                     tcx.sess.delay_span_bug(
740                         DUMMY_SP,
741                         "#[repr(simd)] was applied to an ADT that is not a struct",
742                     );
743                     return Err(LayoutError::Unknown(ty));
744                 }
745
746                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
747                 //
748                 // * #[repr(simd)] struct S(T, T, T, T);
749                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
750                 // * #[repr(simd)] struct S([T; 4])
751                 //
752                 // where T is a primitive scalar (integer/float/pointer).
753
754                 // SIMD vectors with zero fields are not supported.
755                 // (should be caught by typeck)
756                 if def.non_enum_variant().fields.is_empty() {
757                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
758                 }
759
760                 // Type of the first ADT field:
761                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
762
763                 // Heterogeneous SIMD vectors are not supported:
764                 // (should be caught by typeck)
765                 for fi in &def.non_enum_variant().fields {
766                     if fi.ty(tcx, substs) != f0_ty {
767                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
768                     }
769                 }
770
771                 // The element type and number of elements of the SIMD vector
772                 // are obtained from:
773                 //
774                 // * the element type and length of the single array field, if
775                 // the first field is of array type, or
776                 //
777                 // * the homogenous field type and the number of fields.
778                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
779                     // First ADT field is an array:
780
781                     // SIMD vectors with multiple array fields are not supported:
782                     // (should be caught by typeck)
783                     if def.non_enum_variant().fields.len() != 1 {
784                         tcx.sess.fatal(&format!(
785                             "monomorphising SIMD type `{}` with more than one array field",
786                             ty
787                         ));
788                     }
789
790                     // Extract the number of elements from the layout of the array field:
791                     let Ok(TyAndLayout {
792                         layout: Layout(Interned(LayoutS { fields: FieldsShape::Array { count, .. }, .. }, _)),
793                         ..
794                     }) = self.layout_of(f0_ty) else {
795                         return Err(LayoutError::Unknown(ty));
796                     };
797
798                     (*e_ty, *count, true)
799                 } else {
800                     // First ADT field is not an array:
801                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
802                 };
803
804                 // SIMD vectors of zero length are not supported.
805                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
806                 // support.
807                 //
808                 // Can't be caught in typeck if the array length is generic.
809                 if e_len == 0 {
810                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
811                 } else if e_len > MAX_SIMD_LANES {
812                     tcx.sess.fatal(&format!(
813                         "monomorphising SIMD type `{}` of length greater than {}",
814                         ty, MAX_SIMD_LANES,
815                     ));
816                 }
817
818                 // Compute the ABI of the element type:
819                 let e_ly = self.layout_of(e_ty)?;
820                 let Abi::Scalar(e_abi) = e_ly.abi else {
821                     // This error isn't caught in typeck, e.g., if
822                     // the element type of the vector is generic.
823                     tcx.sess.fatal(&format!(
824                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
825                         (integer/float/pointer) element type `{}`",
826                         ty, e_ty
827                     ))
828                 };
829
830                 // Compute the size and alignment of the vector:
831                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
832                 let align = dl.vector_align(size);
833                 let size = size.align_to(align.abi);
834
835                 // Compute the placement of the vector fields:
836                 let fields = if is_array {
837                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
838                 } else {
839                     FieldsShape::Array { stride: e_ly.size, count: e_len }
840                 };
841
842                 tcx.intern_layout(LayoutS {
843                     variants: Variants::Single { index: VariantIdx::new(0) },
844                     fields,
845                     abi: Abi::Vector { element: e_abi, count: e_len },
846                     largest_niche: e_ly.largest_niche,
847                     size,
848                     align,
849                 })
850             }
851
852             // ADTs.
853             ty::Adt(def, substs) => {
854                 // Cache the field layouts.
855                 let variants = def
856                     .variants()
857                     .iter()
858                     .map(|v| {
859                         v.fields
860                             .iter()
861                             .map(|field| self.layout_of(field.ty(tcx, substs)))
862                             .collect::<Result<Vec<_>, _>>()
863                     })
864                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
865
866                 if def.is_union() {
867                     if def.repr().pack.is_some() && def.repr().align.is_some() {
868                         self.tcx.sess.delay_span_bug(
869                             tcx.def_span(def.did()),
870                             "union cannot be packed and aligned",
871                         );
872                         return Err(LayoutError::Unknown(ty));
873                     }
874
875                     let mut align =
876                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
877
878                     if let Some(repr_align) = def.repr().align {
879                         align = align.max(AbiAndPrefAlign::new(repr_align));
880                     }
881
882                     let optimize = !def.repr().inhibit_union_abi_opt();
883                     let mut size = Size::ZERO;
884                     let mut abi = Abi::Aggregate { sized: true };
885                     let index = VariantIdx::new(0);
886                     for field in &variants[index] {
887                         assert!(!field.is_unsized());
888                         align = align.max(field.align);
889
890                         // If all non-ZST fields have the same ABI, forward this ABI
891                         if optimize && !field.is_zst() {
892                             // Normalize scalar_unit to the maximal valid range
893                             let field_abi = match field.abi {
894                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
895                                 Abi::ScalarPair(x, y) => {
896                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
897                                 }
898                                 Abi::Vector { element: x, count } => {
899                                     Abi::Vector { element: scalar_unit(x.value), count }
900                                 }
901                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
902                                     Abi::Aggregate { sized: true }
903                                 }
904                             };
905
906                             if size == Size::ZERO {
907                                 // first non ZST: initialize 'abi'
908                                 abi = field_abi;
909                             } else if abi != field_abi {
910                                 // different fields have different ABI: reset to Aggregate
911                                 abi = Abi::Aggregate { sized: true };
912                             }
913                         }
914
915                         size = cmp::max(size, field.size);
916                     }
917
918                     if let Some(pack) = def.repr().pack {
919                         align = align.min(AbiAndPrefAlign::new(pack));
920                     }
921
922                     return Ok(tcx.intern_layout(LayoutS {
923                         variants: Variants::Single { index },
924                         fields: FieldsShape::Union(
925                             NonZeroUsize::new(variants[index].len())
926                                 .ok_or(LayoutError::Unknown(ty))?,
927                         ),
928                         abi,
929                         largest_niche: None,
930                         align,
931                         size: size.align_to(align.abi),
932                     }));
933                 }
934
935                 // A variant is absent if it's uninhabited and only has ZST fields.
936                 // Present uninhabited variants only require space for their fields,
937                 // but *not* an encoding of the discriminant (e.g., a tag value).
938                 // See issue #49298 for more details on the need to leave space
939                 // for non-ZST uninhabited data (mostly partial initialization).
940                 let absent = |fields: &[TyAndLayout<'_>]| {
941                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
942                     let is_zst = fields.iter().all(|f| f.is_zst());
943                     uninhabited && is_zst
944                 };
945                 let (present_first, present_second) = {
946                     let mut present_variants = variants
947                         .iter_enumerated()
948                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
949                     (present_variants.next(), present_variants.next())
950                 };
951                 let present_first = match present_first {
952                     Some(present_first) => present_first,
953                     // Uninhabited because it has no variants, or only absent ones.
954                     None if def.is_enum() => {
955                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
956                     }
957                     // If it's a struct, still compute a layout so that we can still compute the
958                     // field offsets.
959                     None => VariantIdx::new(0),
960                 };
961
962                 let is_struct = !def.is_enum() ||
963                     // Only one variant is present.
964                     (present_second.is_none() &&
965                     // Representation optimizations are allowed.
966                     !def.repr().inhibit_enum_layout_opt());
967                 if is_struct {
968                     // Struct, or univariant enum equivalent to a struct.
969                     // (Typechecking will reject discriminant-sizing attrs.)
970
971                     let v = present_first;
972                     let kind = if def.is_enum() || variants[v].is_empty() {
973                         StructKind::AlwaysSized
974                     } else {
975                         let param_env = tcx.param_env(def.did());
976                         let last_field = def.variant(v).fields.last().unwrap();
977                         let always_sized =
978                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
979                         if !always_sized {
980                             StructKind::MaybeUnsized
981                         } else {
982                             StructKind::AlwaysSized
983                         }
984                     };
985
986                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
987                     st.variants = Variants::Single { index: v };
988                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
989                     match st.abi {
990                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
991                             // the asserts ensure that we are not using the
992                             // `#[rustc_layout_scalar_valid_range(n)]`
993                             // attribute to widen the range of anything as that would probably
994                             // result in UB somewhere
995                             // FIXME(eddyb) the asserts are probably not needed,
996                             // as larger validity ranges would result in missed
997                             // optimizations, *not* wrongly assuming the inner
998                             // value is valid. e.g. unions enlarge validity ranges,
999                             // because the values may be uninitialized.
1000                             if let Bound::Included(start) = start {
1001                                 // FIXME(eddyb) this might be incorrect - it doesn't
1002                                 // account for wrap-around (end < start) ranges.
1003                                 assert!(scalar.valid_range.start <= start);
1004                                 scalar.valid_range.start = start;
1005                             }
1006                             if let Bound::Included(end) = end {
1007                                 // FIXME(eddyb) this might be incorrect - it doesn't
1008                                 // account for wrap-around (end < start) ranges.
1009                                 assert!(scalar.valid_range.end >= end);
1010                                 scalar.valid_range.end = end;
1011                             }
1012
1013                             // Update `largest_niche` if we have introduced a larger niche.
1014                             let niche = if def.repr().hide_niche() {
1015                                 None
1016                             } else {
1017                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1018                             };
1019                             if let Some(niche) = niche {
1020                                 match st.largest_niche {
1021                                     Some(largest_niche) => {
1022                                         // Replace the existing niche even if they're equal,
1023                                         // because this one is at a lower offset.
1024                                         if largest_niche.available(dl) <= niche.available(dl) {
1025                                             st.largest_niche = Some(niche);
1026                                         }
1027                                     }
1028                                     None => st.largest_niche = Some(niche),
1029                                 }
1030                             }
1031                         }
1032                         _ => assert!(
1033                             start == Bound::Unbounded && end == Bound::Unbounded,
1034                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1035                             def,
1036                             st,
1037                         ),
1038                     }
1039
1040                     return Ok(tcx.intern_layout(st));
1041                 }
1042
1043                 // At this point, we have handled all unions and
1044                 // structs. (We have also handled univariant enums
1045                 // that allow representation optimization.)
1046                 assert!(def.is_enum());
1047
1048                 // The current code for niche-filling relies on variant indices
1049                 // instead of actual discriminants, so dataful enums with
1050                 // explicit discriminants (RFC #2363) would misbehave.
1051                 let no_explicit_discriminants = def
1052                     .variants()
1053                     .iter_enumerated()
1054                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1055
1056                 let mut niche_filling_layout = None;
1057
1058                 // Niche-filling enum optimization.
1059                 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1060                     let mut dataful_variant = None;
1061                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1062
1063                     // Find one non-ZST variant.
1064                     'variants: for (v, fields) in variants.iter_enumerated() {
1065                         if absent(fields) {
1066                             continue 'variants;
1067                         }
1068                         for f in fields {
1069                             if !f.is_zst() {
1070                                 if dataful_variant.is_none() {
1071                                     dataful_variant = Some(v);
1072                                     continue 'variants;
1073                                 } else {
1074                                     dataful_variant = None;
1075                                     break 'variants;
1076                                 }
1077                             }
1078                         }
1079                         niche_variants = *niche_variants.start().min(&v)..=v;
1080                     }
1081
1082                     if niche_variants.start() > niche_variants.end() {
1083                         dataful_variant = None;
1084                     }
1085
1086                     if let Some(i) = dataful_variant {
1087                         let count = (niche_variants.end().as_u32()
1088                             - niche_variants.start().as_u32()
1089                             + 1) as u128;
1090
1091                         // Find the field with the largest niche
1092                         let niche_candidate = variants[i]
1093                             .iter()
1094                             .enumerate()
1095                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1096                             .max_by_key(|(_, niche)| niche.available(dl));
1097
1098                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1099                             niche_candidate.and_then(|(field_index, niche)| {
1100                                 Some((field_index, niche, niche.reserve(self, count)?))
1101                             })
1102                         {
1103                             let mut align = dl.aggregate_align;
1104                             let st = variants
1105                                 .iter_enumerated()
1106                                 .map(|(j, v)| {
1107                                     let mut st = self.univariant_uninterned(
1108                                         ty,
1109                                         v,
1110                                         &def.repr(),
1111                                         StructKind::AlwaysSized,
1112                                     )?;
1113                                     st.variants = Variants::Single { index: j };
1114
1115                                     align = align.max(st.align);
1116
1117                                     Ok(tcx.intern_layout(st))
1118                                 })
1119                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1120
1121                             let offset = st[i].fields().offset(field_index) + niche.offset;
1122                             let size = st[i].size();
1123
1124                             let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1125                                 Abi::Uninhabited
1126                             } else {
1127                                 match st[i].abi() {
1128                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1129                                     Abi::ScalarPair(first, second) => {
1130                                         // We need to use scalar_unit to reset the
1131                                         // valid range to the maximal one for that
1132                                         // primitive, because only the niche is
1133                                         // guaranteed to be initialised, not the
1134                                         // other primitive.
1135                                         if offset.bytes() == 0 {
1136                                             Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
1137                                         } else {
1138                                             Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
1139                                         }
1140                                     }
1141                                     _ => Abi::Aggregate { sized: true },
1142                                 }
1143                             };
1144
1145                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1146
1147                             niche_filling_layout = Some(LayoutS {
1148                                 variants: Variants::Multiple {
1149                                     tag: niche_scalar,
1150                                     tag_encoding: TagEncoding::Niche {
1151                                         dataful_variant: i,
1152                                         niche_variants,
1153                                         niche_start,
1154                                     },
1155                                     tag_field: 0,
1156                                     variants: st,
1157                                 },
1158                                 fields: FieldsShape::Arbitrary {
1159                                     offsets: vec![offset],
1160                                     memory_index: vec![0],
1161                                 },
1162                                 abi,
1163                                 largest_niche,
1164                                 size,
1165                                 align,
1166                             });
1167                         }
1168                     }
1169                 }
1170
1171                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1172                 let discr_type = def.repr().discr_type();
1173                 let bits = Integer::from_attr(self, discr_type).size().bits();
1174                 for (i, discr) in def.discriminants(tcx) {
1175                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1176                         continue;
1177                     }
1178                     let mut x = discr.val as i128;
1179                     if discr_type.is_signed() {
1180                         // sign extend the raw representation to be an i128
1181                         x = (x << (128 - bits)) >> (128 - bits);
1182                     }
1183                     if x < min {
1184                         min = x;
1185                     }
1186                     if x > max {
1187                         max = x;
1188                     }
1189                 }
1190                 // We might have no inhabited variants, so pretend there's at least one.
1191                 if (min, max) == (i128::MAX, i128::MIN) {
1192                     min = 0;
1193                     max = 0;
1194                 }
1195                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1196                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1197
1198                 let mut align = dl.aggregate_align;
1199                 let mut size = Size::ZERO;
1200
1201                 // We're interested in the smallest alignment, so start large.
1202                 let mut start_align = Align::from_bytes(256).unwrap();
1203                 assert_eq!(Integer::for_align(dl, start_align), None);
1204
1205                 // repr(C) on an enum tells us to make a (tag, union) layout,
1206                 // so we need to grow the prefix alignment to be at least
1207                 // the alignment of the union. (This value is used both for
1208                 // determining the alignment of the overall enum, and the
1209                 // determining the alignment of the payload after the tag.)
1210                 let mut prefix_align = min_ity.align(dl).abi;
1211                 if def.repr().c() {
1212                     for fields in &variants {
1213                         for field in fields {
1214                             prefix_align = prefix_align.max(field.align.abi);
1215                         }
1216                     }
1217                 }
1218
1219                 // Create the set of structs that represent each variant.
1220                 let mut layout_variants = variants
1221                     .iter_enumerated()
1222                     .map(|(i, field_layouts)| {
1223                         let mut st = self.univariant_uninterned(
1224                             ty,
1225                             &field_layouts,
1226                             &def.repr(),
1227                             StructKind::Prefixed(min_ity.size(), prefix_align),
1228                         )?;
1229                         st.variants = Variants::Single { index: i };
1230                         // Find the first field we can't move later
1231                         // to make room for a larger discriminant.
1232                         for field in
1233                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1234                         {
1235                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1236                                 start_align = start_align.min(field.align.abi);
1237                                 break;
1238                             }
1239                         }
1240                         size = cmp::max(size, st.size);
1241                         align = align.max(st.align);
1242                         Ok(st)
1243                     })
1244                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1245
1246                 // Align the maximum variant size to the largest alignment.
1247                 size = size.align_to(align.abi);
1248
1249                 if size.bytes() >= dl.obj_size_bound() {
1250                     return Err(LayoutError::SizeOverflow(ty));
1251                 }
1252
1253                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1254                 if typeck_ity < min_ity {
1255                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1256                     // some reason at this point (based on values discriminant can take on). Mostly
1257                     // because this discriminant will be loaded, and then stored into variable of
1258                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1259                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1260                     // discriminant values. That would be a bug, because then, in codegen, in order
1261                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1262                     // space necessary to represent would have to be discarded (or layout is wrong
1263                     // on thinking it needs 16 bits)
1264                     bug!(
1265                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1266                         min_ity,
1267                         typeck_ity
1268                     );
1269                     // However, it is fine to make discr type however large (as an optimisation)
1270                     // after this point â€“ we’ll just truncate the value we load in codegen.
1271                 }
1272
1273                 // Check to see if we should use a different type for the
1274                 // discriminant. We can safely use a type with the same size
1275                 // as the alignment of the first field of each variant.
1276                 // We increase the size of the discriminant to avoid LLVM copying
1277                 // padding when it doesn't need to. This normally causes unaligned
1278                 // load/stores and excessive memcpy/memset operations. By using a
1279                 // bigger integer size, LLVM can be sure about its contents and
1280                 // won't be so conservative.
1281
1282                 // Use the initial field alignment
1283                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1284                     min_ity
1285                 } else {
1286                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1287                 };
1288
1289                 // If the alignment is not larger than the chosen discriminant size,
1290                 // don't use the alignment as the final size.
1291                 if ity <= min_ity {
1292                     ity = min_ity;
1293                 } else {
1294                     // Patch up the variants' first few fields.
1295                     let old_ity_size = min_ity.size();
1296                     let new_ity_size = ity.size();
1297                     for variant in &mut layout_variants {
1298                         match variant.fields {
1299                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1300                                 for i in offsets {
1301                                     if *i <= old_ity_size {
1302                                         assert_eq!(*i, old_ity_size);
1303                                         *i = new_ity_size;
1304                                     }
1305                                 }
1306                                 // We might be making the struct larger.
1307                                 if variant.size <= old_ity_size {
1308                                     variant.size = new_ity_size;
1309                                 }
1310                             }
1311                             _ => bug!(),
1312                         }
1313                     }
1314                 }
1315
1316                 let tag_mask = ity.size().unsigned_int_max();
1317                 let tag = Scalar {
1318                     value: Int(ity, signed),
1319                     valid_range: WrappingRange {
1320                         start: (min as u128 & tag_mask),
1321                         end: (max as u128 & tag_mask),
1322                     },
1323                 };
1324                 let mut abi = Abi::Aggregate { sized: true };
1325
1326                 // Without latter check aligned enums with custom discriminant values
1327                 // Would result in ICE see the issue #92464 for more info
1328                 if tag.value.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1329                     abi = Abi::Scalar(tag);
1330                 } else {
1331                     // Try to use a ScalarPair for all tagged enums.
1332                     let mut common_prim = None;
1333                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1334                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1335                             bug!();
1336                         };
1337                         let mut fields =
1338                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1339                         let (field, offset) = match (fields.next(), fields.next()) {
1340                             (None, None) => continue,
1341                             (Some(pair), None) => pair,
1342                             _ => {
1343                                 common_prim = None;
1344                                 break;
1345                             }
1346                         };
1347                         let prim = match field.abi {
1348                             Abi::Scalar(scalar) => scalar.value,
1349                             _ => {
1350                                 common_prim = None;
1351                                 break;
1352                             }
1353                         };
1354                         if let Some(pair) = common_prim {
1355                             // This is pretty conservative. We could go fancier
1356                             // by conflating things like i32 and u32, or even
1357                             // realising that (u8, u8) could just cohabit with
1358                             // u16 or even u32.
1359                             if pair != (prim, offset) {
1360                                 common_prim = None;
1361                                 break;
1362                             }
1363                         } else {
1364                             common_prim = Some((prim, offset));
1365                         }
1366                     }
1367                     if let Some((prim, offset)) = common_prim {
1368                         let pair = self.scalar_pair(tag, scalar_unit(prim));
1369                         let pair_offsets = match pair.fields {
1370                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1371                                 assert_eq!(memory_index, &[0, 1]);
1372                                 offsets
1373                             }
1374                             _ => bug!(),
1375                         };
1376                         if pair_offsets[0] == Size::ZERO
1377                             && pair_offsets[1] == *offset
1378                             && align == pair.align
1379                             && size == pair.size
1380                         {
1381                             // We can use `ScalarPair` only when it matches our
1382                             // already computed layout (including `#[repr(C)]`).
1383                             abi = pair.abi;
1384                         }
1385                     }
1386                 }
1387
1388                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1389                     abi = Abi::Uninhabited;
1390                 }
1391
1392                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1393
1394                 let layout_variants =
1395                     layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1396
1397                 let tagged_layout = LayoutS {
1398                     variants: Variants::Multiple {
1399                         tag,
1400                         tag_encoding: TagEncoding::Direct,
1401                         tag_field: 0,
1402                         variants: layout_variants,
1403                     },
1404                     fields: FieldsShape::Arbitrary {
1405                         offsets: vec![Size::ZERO],
1406                         memory_index: vec![0],
1407                     },
1408                     largest_niche,
1409                     abi,
1410                     align,
1411                     size,
1412                 };
1413
1414                 let best_layout = match (tagged_layout, niche_filling_layout) {
1415                     (tagged_layout, Some(niche_filling_layout)) => {
1416                         // Pick the smaller layout; otherwise,
1417                         // pick the layout with the larger niche; otherwise,
1418                         // pick tagged as it has simpler codegen.
1419                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1420                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1421                             (layout.size, cmp::Reverse(niche_size))
1422                         })
1423                     }
1424                     (tagged_layout, None) => tagged_layout,
1425                 };
1426
1427                 tcx.intern_layout(best_layout)
1428             }
1429
1430             // Types with no meaningful known layout.
1431             ty::Projection(_) | ty::Opaque(..) => {
1432                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1433                 // if that was possible, so there's no reason to try again here.
1434                 return Err(LayoutError::Unknown(ty));
1435             }
1436
1437             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1438                 bug!("Layout::compute: unexpected type `{}`", ty)
1439             }
1440
1441             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1442                 return Err(LayoutError::Unknown(ty));
1443             }
1444         })
1445     }
1446 }
1447
1448 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1449 #[derive(Clone, Debug, PartialEq)]
1450 enum SavedLocalEligibility {
1451     Unassigned,
1452     Assigned(VariantIdx),
1453     // FIXME: Use newtype_index so we aren't wasting bytes
1454     Ineligible(Option<u32>),
1455 }
1456
1457 // When laying out generators, we divide our saved local fields into two
1458 // categories: overlap-eligible and overlap-ineligible.
1459 //
1460 // Those fields which are ineligible for overlap go in a "prefix" at the
1461 // beginning of the layout, and always have space reserved for them.
1462 //
1463 // Overlap-eligible fields are only assigned to one variant, so we lay
1464 // those fields out for each variant and put them right after the
1465 // prefix.
1466 //
1467 // Finally, in the layout details, we point to the fields from the
1468 // variants they are assigned to. It is possible for some fields to be
1469 // included in multiple variants. No field ever "moves around" in the
1470 // layout; its offset is always the same.
1471 //
1472 // Also included in the layout are the upvars and the discriminant.
1473 // These are included as fields on the "outer" layout; they are not part
1474 // of any variant.
1475 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1476     /// Compute the eligibility and assignment of each local.
1477     fn generator_saved_local_eligibility(
1478         &self,
1479         info: &GeneratorLayout<'tcx>,
1480     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1481         use SavedLocalEligibility::*;
1482
1483         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1484             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1485
1486         // The saved locals not eligible for overlap. These will get
1487         // "promoted" to the prefix of our generator.
1488         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1489
1490         // Figure out which of our saved locals are fields in only
1491         // one variant. The rest are deemed ineligible for overlap.
1492         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1493             for local in fields {
1494                 match assignments[*local] {
1495                     Unassigned => {
1496                         assignments[*local] = Assigned(variant_index);
1497                     }
1498                     Assigned(idx) => {
1499                         // We've already seen this local at another suspension
1500                         // point, so it is no longer a candidate.
1501                         trace!(
1502                             "removing local {:?} in >1 variant ({:?}, {:?})",
1503                             local,
1504                             variant_index,
1505                             idx
1506                         );
1507                         ineligible_locals.insert(*local);
1508                         assignments[*local] = Ineligible(None);
1509                     }
1510                     Ineligible(_) => {}
1511                 }
1512             }
1513         }
1514
1515         // Next, check every pair of eligible locals to see if they
1516         // conflict.
1517         for local_a in info.storage_conflicts.rows() {
1518             let conflicts_a = info.storage_conflicts.count(local_a);
1519             if ineligible_locals.contains(local_a) {
1520                 continue;
1521             }
1522
1523             for local_b in info.storage_conflicts.iter(local_a) {
1524                 // local_a and local_b are storage live at the same time, therefore they
1525                 // cannot overlap in the generator layout. The only way to guarantee
1526                 // this is if they are in the same variant, or one is ineligible
1527                 // (which means it is stored in every variant).
1528                 if ineligible_locals.contains(local_b)
1529                     || assignments[local_a] == assignments[local_b]
1530                 {
1531                     continue;
1532                 }
1533
1534                 // If they conflict, we will choose one to make ineligible.
1535                 // This is not always optimal; it's just a greedy heuristic that
1536                 // seems to produce good results most of the time.
1537                 let conflicts_b = info.storage_conflicts.count(local_b);
1538                 let (remove, other) =
1539                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1540                 ineligible_locals.insert(remove);
1541                 assignments[remove] = Ineligible(None);
1542                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1543             }
1544         }
1545
1546         // Count the number of variants in use. If only one of them, then it is
1547         // impossible to overlap any locals in our layout. In this case it's
1548         // always better to make the remaining locals ineligible, so we can
1549         // lay them out with the other locals in the prefix and eliminate
1550         // unnecessary padding bytes.
1551         {
1552             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1553             for assignment in &assignments {
1554                 if let Assigned(idx) = assignment {
1555                     used_variants.insert(*idx);
1556                 }
1557             }
1558             if used_variants.count() < 2 {
1559                 for assignment in assignments.iter_mut() {
1560                     *assignment = Ineligible(None);
1561                 }
1562                 ineligible_locals.insert_all();
1563             }
1564         }
1565
1566         // Write down the order of our locals that will be promoted to the prefix.
1567         {
1568             for (idx, local) in ineligible_locals.iter().enumerate() {
1569                 assignments[local] = Ineligible(Some(idx as u32));
1570             }
1571         }
1572         debug!("generator saved local assignments: {:?}", assignments);
1573
1574         (ineligible_locals, assignments)
1575     }
1576
1577     /// Compute the full generator layout.
1578     fn generator_layout(
1579         &self,
1580         ty: Ty<'tcx>,
1581         def_id: hir::def_id::DefId,
1582         substs: SubstsRef<'tcx>,
1583     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1584         use SavedLocalEligibility::*;
1585         let tcx = self.tcx;
1586         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1587
1588         let Some(info) = tcx.generator_layout(def_id) else {
1589             return Err(LayoutError::Unknown(ty));
1590         };
1591         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1592
1593         // Build a prefix layout, including "promoting" all ineligible
1594         // locals as part of the prefix. We compute the layout of all of
1595         // these fields at once to get optimal packing.
1596         let tag_index = substs.as_generator().prefix_tys().count();
1597
1598         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1599         let max_discr = (info.variant_fields.len() - 1) as u128;
1600         let discr_int = Integer::fit_unsigned(max_discr);
1601         let discr_int_ty = discr_int.to_ty(tcx, false);
1602         let tag = Scalar {
1603             value: Primitive::Int(discr_int, false),
1604             valid_range: WrappingRange { start: 0, end: max_discr },
1605         };
1606         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1607         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1608
1609         let promoted_layouts = ineligible_locals
1610             .iter()
1611             .map(|local| subst_field(info.field_tys[local]))
1612             .map(|ty| tcx.mk_maybe_uninit(ty))
1613             .map(|ty| self.layout_of(ty));
1614         let prefix_layouts = substs
1615             .as_generator()
1616             .prefix_tys()
1617             .map(|ty| self.layout_of(ty))
1618             .chain(iter::once(Ok(tag_layout)))
1619             .chain(promoted_layouts)
1620             .collect::<Result<Vec<_>, _>>()?;
1621         let prefix = self.univariant_uninterned(
1622             ty,
1623             &prefix_layouts,
1624             &ReprOptions::default(),
1625             StructKind::AlwaysSized,
1626         )?;
1627
1628         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1629
1630         // Split the prefix layout into the "outer" fields (upvars and
1631         // discriminant) and the "promoted" fields. Promoted fields will
1632         // get included in each variant that requested them in
1633         // GeneratorLayout.
1634         debug!("prefix = {:#?}", prefix);
1635         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1636             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1637                 let mut inverse_memory_index = invert_mapping(&memory_index);
1638
1639                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1640                 // "outer" and "promoted" fields respectively.
1641                 let b_start = (tag_index + 1) as u32;
1642                 let offsets_b = offsets.split_off(b_start as usize);
1643                 let offsets_a = offsets;
1644
1645                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1646                 // by preserving the order but keeping only one disjoint "half" each.
1647                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1648                 let inverse_memory_index_b: Vec<_> =
1649                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1650                 inverse_memory_index.retain(|&i| i < b_start);
1651                 let inverse_memory_index_a = inverse_memory_index;
1652
1653                 // Since `inverse_memory_index_{a,b}` each only refer to their
1654                 // respective fields, they can be safely inverted
1655                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1656                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1657
1658                 let outer_fields =
1659                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1660                 (outer_fields, offsets_b, memory_index_b)
1661             }
1662             _ => bug!(),
1663         };
1664
1665         let mut size = prefix.size;
1666         let mut align = prefix.align;
1667         let variants = info
1668             .variant_fields
1669             .iter_enumerated()
1670             .map(|(index, variant_fields)| {
1671                 // Only include overlap-eligible fields when we compute our variant layout.
1672                 let variant_only_tys = variant_fields
1673                     .iter()
1674                     .filter(|local| match assignments[**local] {
1675                         Unassigned => bug!(),
1676                         Assigned(v) if v == index => true,
1677                         Assigned(_) => bug!("assignment does not match variant"),
1678                         Ineligible(_) => false,
1679                     })
1680                     .map(|local| subst_field(info.field_tys[*local]));
1681
1682                 let mut variant = self.univariant_uninterned(
1683                     ty,
1684                     &variant_only_tys
1685                         .map(|ty| self.layout_of(ty))
1686                         .collect::<Result<Vec<_>, _>>()?,
1687                     &ReprOptions::default(),
1688                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1689                 )?;
1690                 variant.variants = Variants::Single { index };
1691
1692                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1693                     bug!();
1694                 };
1695
1696                 // Now, stitch the promoted and variant-only fields back together in
1697                 // the order they are mentioned by our GeneratorLayout.
1698                 // Because we only use some subset (that can differ between variants)
1699                 // of the promoted fields, we can't just pick those elements of the
1700                 // `promoted_memory_index` (as we'd end up with gaps).
1701                 // So instead, we build an "inverse memory_index", as if all of the
1702                 // promoted fields were being used, but leave the elements not in the
1703                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1704                 // obtain a valid (bijective) mapping.
1705                 const INVALID_FIELD_IDX: u32 = !0;
1706                 let mut combined_inverse_memory_index =
1707                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1708                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1709                 let combined_offsets = variant_fields
1710                     .iter()
1711                     .enumerate()
1712                     .map(|(i, local)| {
1713                         let (offset, memory_index) = match assignments[*local] {
1714                             Unassigned => bug!(),
1715                             Assigned(_) => {
1716                                 let (offset, memory_index) =
1717                                     offsets_and_memory_index.next().unwrap();
1718                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1719                             }
1720                             Ineligible(field_idx) => {
1721                                 let field_idx = field_idx.unwrap() as usize;
1722                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1723                             }
1724                         };
1725                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1726                         offset
1727                     })
1728                     .collect();
1729
1730                 // Remove the unused slots and invert the mapping to obtain the
1731                 // combined `memory_index` (also see previous comment).
1732                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1733                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1734
1735                 variant.fields = FieldsShape::Arbitrary {
1736                     offsets: combined_offsets,
1737                     memory_index: combined_memory_index,
1738                 };
1739
1740                 size = size.max(variant.size);
1741                 align = align.max(variant.align);
1742                 Ok(tcx.intern_layout(variant))
1743             })
1744             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1745
1746         size = size.align_to(align.abi);
1747
1748         let abi =
1749             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1750                 Abi::Uninhabited
1751             } else {
1752                 Abi::Aggregate { sized: true }
1753             };
1754
1755         let layout = tcx.intern_layout(LayoutS {
1756             variants: Variants::Multiple {
1757                 tag,
1758                 tag_encoding: TagEncoding::Direct,
1759                 tag_field: tag_index,
1760                 variants,
1761             },
1762             fields: outer_fields,
1763             abi,
1764             largest_niche: prefix.largest_niche,
1765             size,
1766             align,
1767         });
1768         debug!("generator layout ({:?}): {:#?}", ty, layout);
1769         Ok(layout)
1770     }
1771
1772     /// This is invoked by the `layout_of` query to record the final
1773     /// layout of each type.
1774     #[inline(always)]
1775     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1776         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1777         // for dumping later.
1778         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1779             self.record_layout_for_printing_outlined(layout)
1780         }
1781     }
1782
1783     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1784         // Ignore layouts that are done with non-empty environments or
1785         // non-monomorphic layouts, as the user only wants to see the stuff
1786         // resulting from the final codegen session.
1787         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1788             return;
1789         }
1790
1791         // (delay format until we actually need it)
1792         let record = |kind, packed, opt_discr_size, variants| {
1793             let type_desc = format!("{:?}", layout.ty);
1794             self.tcx.sess.code_stats.record_type_size(
1795                 kind,
1796                 type_desc,
1797                 layout.align.abi,
1798                 layout.size,
1799                 packed,
1800                 opt_discr_size,
1801                 variants,
1802             );
1803         };
1804
1805         let adt_def = match *layout.ty.kind() {
1806             ty::Adt(ref adt_def, _) => {
1807                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1808                 adt_def
1809             }
1810
1811             ty::Closure(..) => {
1812                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1813                 record(DataTypeKind::Closure, false, None, vec![]);
1814                 return;
1815             }
1816
1817             _ => {
1818                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1819                 return;
1820             }
1821         };
1822
1823         let adt_kind = adt_def.adt_kind();
1824         let adt_packed = adt_def.repr().pack.is_some();
1825
1826         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1827             let mut min_size = Size::ZERO;
1828             let field_info: Vec<_> = flds
1829                 .iter()
1830                 .enumerate()
1831                 .map(|(i, &name)| {
1832                     let field_layout = layout.field(self, i);
1833                     let offset = layout.fields.offset(i);
1834                     let field_end = offset + field_layout.size;
1835                     if min_size < field_end {
1836                         min_size = field_end;
1837                     }
1838                     FieldInfo {
1839                         name: name.to_string(),
1840                         offset: offset.bytes(),
1841                         size: field_layout.size.bytes(),
1842                         align: field_layout.align.abi.bytes(),
1843                     }
1844                 })
1845                 .collect();
1846
1847             VariantInfo {
1848                 name: n.map(|n| n.to_string()),
1849                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1850                 align: layout.align.abi.bytes(),
1851                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1852                 fields: field_info,
1853             }
1854         };
1855
1856         match layout.variants {
1857             Variants::Single { index } => {
1858                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1859                     debug!(
1860                         "print-type-size `{:#?}` variant {}",
1861                         layout,
1862                         adt_def.variant(index).name
1863                     );
1864                     let variant_def = &adt_def.variant(index);
1865                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1866                     record(
1867                         adt_kind.into(),
1868                         adt_packed,
1869                         None,
1870                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1871                     );
1872                 } else {
1873                     // (This case arises for *empty* enums; so give it
1874                     // zero variants.)
1875                     record(adt_kind.into(), adt_packed, None, vec![]);
1876                 }
1877             }
1878
1879             Variants::Multiple { tag, ref tag_encoding, .. } => {
1880                 debug!(
1881                     "print-type-size `{:#?}` adt general variants def {}",
1882                     layout.ty,
1883                     adt_def.variants().len()
1884                 );
1885                 let variant_infos: Vec<_> = adt_def
1886                     .variants()
1887                     .iter_enumerated()
1888                     .map(|(i, variant_def)| {
1889                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1890                         build_variant_info(
1891                             Some(variant_def.name),
1892                             &fields,
1893                             layout.for_variant(self, i),
1894                         )
1895                     })
1896                     .collect();
1897                 record(
1898                     adt_kind.into(),
1899                     adt_packed,
1900                     match tag_encoding {
1901                         TagEncoding::Direct => Some(tag.value.size(self)),
1902                         _ => None,
1903                     },
1904                     variant_infos,
1905                 );
1906             }
1907         }
1908     }
1909 }
1910
1911 /// Type size "skeleton", i.e., the only information determining a type's size.
1912 /// While this is conservative, (aside from constant sizes, only pointers,
1913 /// newtypes thereof and null pointer optimized enums are allowed), it is
1914 /// enough to statically check common use cases of transmute.
1915 #[derive(Copy, Clone, Debug)]
1916 pub enum SizeSkeleton<'tcx> {
1917     /// Any statically computable Layout.
1918     Known(Size),
1919
1920     /// A potentially-fat pointer.
1921     Pointer {
1922         /// If true, this pointer is never null.
1923         non_zero: bool,
1924         /// The type which determines the unsized metadata, if any,
1925         /// of this pointer. Either a type parameter or a projection
1926         /// depending on one, with regions erased.
1927         tail: Ty<'tcx>,
1928     },
1929 }
1930
1931 impl<'tcx> SizeSkeleton<'tcx> {
1932     pub fn compute(
1933         ty: Ty<'tcx>,
1934         tcx: TyCtxt<'tcx>,
1935         param_env: ty::ParamEnv<'tcx>,
1936     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1937         debug_assert!(!ty.has_infer_types_or_consts());
1938
1939         // First try computing a static layout.
1940         let err = match tcx.layout_of(param_env.and(ty)) {
1941             Ok(layout) => {
1942                 return Ok(SizeSkeleton::Known(layout.size));
1943             }
1944             Err(err) => err,
1945         };
1946
1947         match *ty.kind() {
1948             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1949                 let non_zero = !ty.is_unsafe_ptr();
1950                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1951                 match tail.kind() {
1952                     ty::Param(_) | ty::Projection(_) => {
1953                         debug_assert!(tail.has_param_types_or_consts());
1954                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1955                     }
1956                     _ => bug!(
1957                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1958                               tail `{}` is not a type parameter or a projection",
1959                         ty,
1960                         err,
1961                         tail
1962                     ),
1963                 }
1964             }
1965
1966             ty::Adt(def, substs) => {
1967                 // Only newtypes and enums w/ nullable pointer optimization.
1968                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1969                     return Err(err);
1970                 }
1971
1972                 // Get a zero-sized variant or a pointer newtype.
1973                 let zero_or_ptr_variant = |i| {
1974                     let i = VariantIdx::new(i);
1975                     let fields =
1976                         def.variant(i).fields.iter().map(|field| {
1977                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1978                         });
1979                     let mut ptr = None;
1980                     for field in fields {
1981                         let field = field?;
1982                         match field {
1983                             SizeSkeleton::Known(size) => {
1984                                 if size.bytes() > 0 {
1985                                     return Err(err);
1986                                 }
1987                             }
1988                             SizeSkeleton::Pointer { .. } => {
1989                                 if ptr.is_some() {
1990                                     return Err(err);
1991                                 }
1992                                 ptr = Some(field);
1993                             }
1994                         }
1995                     }
1996                     Ok(ptr)
1997                 };
1998
1999                 let v0 = zero_or_ptr_variant(0)?;
2000                 // Newtype.
2001                 if def.variants().len() == 1 {
2002                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2003                         return Ok(SizeSkeleton::Pointer {
2004                             non_zero: non_zero
2005                                 || match tcx.layout_scalar_valid_range(def.did()) {
2006                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2007                                     (Bound::Included(start), Bound::Included(end)) => {
2008                                         0 < start && start < end
2009                                     }
2010                                     _ => false,
2011                                 },
2012                             tail,
2013                         });
2014                     } else {
2015                         return Err(err);
2016                     }
2017                 }
2018
2019                 let v1 = zero_or_ptr_variant(1)?;
2020                 // Nullable pointer enum optimization.
2021                 match (v0, v1) {
2022                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2023                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2024                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2025                     }
2026                     _ => Err(err),
2027                 }
2028             }
2029
2030             ty::Projection(_) | ty::Opaque(..) => {
2031                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2032                 if ty == normalized {
2033                     Err(err)
2034                 } else {
2035                     SizeSkeleton::compute(normalized, tcx, param_env)
2036                 }
2037             }
2038
2039             _ => Err(err),
2040         }
2041     }
2042
2043     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2044         match (self, other) {
2045             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2046             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2047                 a == b
2048             }
2049             _ => false,
2050         }
2051     }
2052 }
2053
2054 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2055     fn tcx(&self) -> TyCtxt<'tcx>;
2056 }
2057
2058 pub trait HasParamEnv<'tcx> {
2059     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2060 }
2061
2062 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2063     #[inline]
2064     fn data_layout(&self) -> &TargetDataLayout {
2065         &self.data_layout
2066     }
2067 }
2068
2069 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2070     fn target_spec(&self) -> &Target {
2071         &self.sess.target
2072     }
2073 }
2074
2075 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2076     #[inline]
2077     fn tcx(&self) -> TyCtxt<'tcx> {
2078         *self
2079     }
2080 }
2081
2082 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2083     #[inline]
2084     fn data_layout(&self) -> &TargetDataLayout {
2085         &self.data_layout
2086     }
2087 }
2088
2089 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2090     fn target_spec(&self) -> &Target {
2091         &self.sess.target
2092     }
2093 }
2094
2095 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2096     #[inline]
2097     fn tcx(&self) -> TyCtxt<'tcx> {
2098         **self
2099     }
2100 }
2101
2102 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2103     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2104         self.param_env
2105     }
2106 }
2107
2108 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2109     fn data_layout(&self) -> &TargetDataLayout {
2110         self.tcx.data_layout()
2111     }
2112 }
2113
2114 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2115     fn target_spec(&self) -> &Target {
2116         self.tcx.target_spec()
2117     }
2118 }
2119
2120 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2121     fn tcx(&self) -> TyCtxt<'tcx> {
2122         self.tcx.tcx()
2123     }
2124 }
2125
2126 pub trait MaybeResult<T> {
2127     type Error;
2128
2129     fn from(x: Result<T, Self::Error>) -> Self;
2130     fn to_result(self) -> Result<T, Self::Error>;
2131 }
2132
2133 impl<T> MaybeResult<T> for T {
2134     type Error = !;
2135
2136     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2137         x
2138     }
2139     fn to_result(self) -> Result<T, Self::Error> {
2140         Ok(self)
2141     }
2142 }
2143
2144 impl<T, E> MaybeResult<T> for Result<T, E> {
2145     type Error = E;
2146
2147     fn from(x: Result<T, Self::Error>) -> Self {
2148         x
2149     }
2150     fn to_result(self) -> Result<T, Self::Error> {
2151         self
2152     }
2153 }
2154
2155 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2156
2157 /// Trait for contexts that want to be able to compute layouts of types.
2158 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2159 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2160     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2161     /// returned from `layout_of` (see also `handle_layout_err`).
2162     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2163
2164     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2165     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2166     #[inline]
2167     fn layout_tcx_at_span(&self) -> Span {
2168         DUMMY_SP
2169     }
2170
2171     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2172     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2173     ///
2174     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2175     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2176     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2177     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2178     fn handle_layout_err(
2179         &self,
2180         err: LayoutError<'tcx>,
2181         span: Span,
2182         ty: Ty<'tcx>,
2183     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2184 }
2185
2186 /// Blanket extension trait for contexts that can compute layouts of types.
2187 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2188     /// Computes the layout of a type. Note that this implicitly
2189     /// executes in "reveal all" mode, and will normalize the input type.
2190     #[inline]
2191     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2192         self.spanned_layout_of(ty, DUMMY_SP)
2193     }
2194
2195     /// Computes the layout of a type, at `span`. Note that this implicitly
2196     /// executes in "reveal all" mode, and will normalize the input type.
2197     // FIXME(eddyb) avoid passing information like this, and instead add more
2198     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2199     #[inline]
2200     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2201         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2202         let tcx = self.tcx().at(span);
2203
2204         MaybeResult::from(
2205             tcx.layout_of(self.param_env().and(ty))
2206                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2207         )
2208     }
2209 }
2210
2211 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2212
2213 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2214     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2215
2216     #[inline]
2217     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2218         err
2219     }
2220 }
2221
2222 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2223     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2224
2225     #[inline]
2226     fn layout_tcx_at_span(&self) -> Span {
2227         self.tcx.span
2228     }
2229
2230     #[inline]
2231     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2232         err
2233     }
2234 }
2235
2236 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2237 where
2238     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2239 {
2240     fn ty_and_layout_for_variant(
2241         this: TyAndLayout<'tcx>,
2242         cx: &C,
2243         variant_index: VariantIdx,
2244     ) -> TyAndLayout<'tcx> {
2245         let layout = match this.variants {
2246             Variants::Single { index }
2247                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2248                 if index == variant_index &&
2249                 // Don't confuse variants of uninhabited enums with the enum itself.
2250                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2251                 this.fields != FieldsShape::Primitive =>
2252             {
2253                 this.layout
2254             }
2255
2256             Variants::Single { index } => {
2257                 let tcx = cx.tcx();
2258                 let param_env = cx.param_env();
2259
2260                 // Deny calling for_variant more than once for non-Single enums.
2261                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2262                     assert_eq!(original_layout.variants, Variants::Single { index });
2263                 }
2264
2265                 let fields = match this.ty.kind() {
2266                     ty::Adt(def, _) if def.variants().is_empty() =>
2267                         bug!("for_variant called on zero-variant enum"),
2268                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2269                     _ => bug!(),
2270                 };
2271                 tcx.intern_layout(LayoutS {
2272                     variants: Variants::Single { index: variant_index },
2273                     fields: match NonZeroUsize::new(fields) {
2274                         Some(fields) => FieldsShape::Union(fields),
2275                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2276                     },
2277                     abi: Abi::Uninhabited,
2278                     largest_niche: None,
2279                     align: tcx.data_layout.i8_align,
2280                     size: Size::ZERO,
2281                 })
2282             }
2283
2284             Variants::Multiple { ref variants, .. } => variants[variant_index],
2285         };
2286
2287         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2288
2289         TyAndLayout { ty: this.ty, layout }
2290     }
2291
2292     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2293         enum TyMaybeWithLayout<'tcx> {
2294             Ty(Ty<'tcx>),
2295             TyAndLayout(TyAndLayout<'tcx>),
2296         }
2297
2298         fn field_ty_or_layout<'tcx>(
2299             this: TyAndLayout<'tcx>,
2300             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2301             i: usize,
2302         ) -> TyMaybeWithLayout<'tcx> {
2303             let tcx = cx.tcx();
2304             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2305                 TyAndLayout {
2306                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2307                     ty: tag.value.to_ty(tcx),
2308                 }
2309             };
2310
2311             match *this.ty.kind() {
2312                 ty::Bool
2313                 | ty::Char
2314                 | ty::Int(_)
2315                 | ty::Uint(_)
2316                 | ty::Float(_)
2317                 | ty::FnPtr(_)
2318                 | ty::Never
2319                 | ty::FnDef(..)
2320                 | ty::GeneratorWitness(..)
2321                 | ty::Foreign(..)
2322                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2323
2324                 // Potentially-fat pointers.
2325                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2326                     assert!(i < this.fields.count());
2327
2328                     // Reuse the fat `*T` type as its own thin pointer data field.
2329                     // This provides information about, e.g., DST struct pointees
2330                     // (which may have no non-DST form), and will work as long
2331                     // as the `Abi` or `FieldsShape` is checked by users.
2332                     if i == 0 {
2333                         let nil = tcx.mk_unit();
2334                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2335                             tcx.mk_mut_ptr(nil)
2336                         } else {
2337                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2338                         };
2339
2340                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2341                         // the `Result` should always work because the type is
2342                         // always either `*mut ()` or `&'static mut ()`.
2343                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2344                             ty: this.ty,
2345                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2346                         });
2347                     }
2348
2349                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2350                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2351                         ty::Dynamic(_, _) => {
2352                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2353                                 tcx.lifetimes.re_static,
2354                                 tcx.mk_array(tcx.types.usize, 3),
2355                             ))
2356                             /* FIXME: use actual fn pointers
2357                             Warning: naively computing the number of entries in the
2358                             vtable by counting the methods on the trait + methods on
2359                             all parent traits does not work, because some methods can
2360                             be not object safe and thus excluded from the vtable.
2361                             Increase this counter if you tried to implement this but
2362                             failed to do it without duplicating a lot of code from
2363                             other places in the compiler: 2
2364                             tcx.mk_tup(&[
2365                                 tcx.mk_array(tcx.types.usize, 3),
2366                                 tcx.mk_array(Option<fn()>),
2367                             ])
2368                             */
2369                         }
2370                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2371                     }
2372                 }
2373
2374                 // Arrays and slices.
2375                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2376                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2377
2378                 // Tuples, generators and closures.
2379                 ty::Closure(_, ref substs) => field_ty_or_layout(
2380                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2381                     cx,
2382                     i,
2383                 ),
2384
2385                 ty::Generator(def_id, ref substs, _) => match this.variants {
2386                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2387                         substs
2388                             .as_generator()
2389                             .state_tys(def_id, tcx)
2390                             .nth(index.as_usize())
2391                             .unwrap()
2392                             .nth(i)
2393                             .unwrap(),
2394                     ),
2395                     Variants::Multiple { tag, tag_field, .. } => {
2396                         if i == tag_field {
2397                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2398                         }
2399                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2400                     }
2401                 },
2402
2403                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2404
2405                 // ADTs.
2406                 ty::Adt(def, substs) => {
2407                     match this.variants {
2408                         Variants::Single { index } => {
2409                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2410                         }
2411
2412                         // Discriminant field for enums (where applicable).
2413                         Variants::Multiple { tag, .. } => {
2414                             assert_eq!(i, 0);
2415                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2416                         }
2417                     }
2418                 }
2419
2420                 ty::Projection(_)
2421                 | ty::Bound(..)
2422                 | ty::Placeholder(..)
2423                 | ty::Opaque(..)
2424                 | ty::Param(_)
2425                 | ty::Infer(_)
2426                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2427             }
2428         }
2429
2430         match field_ty_or_layout(this, cx, i) {
2431             TyMaybeWithLayout::Ty(field_ty) => {
2432                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2433                     bug!(
2434                         "failed to get layout for `{}`: {},\n\
2435                          despite it being a field (#{}) of an existing layout: {:#?}",
2436                         field_ty,
2437                         e,
2438                         i,
2439                         this
2440                     )
2441                 })
2442             }
2443             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2444         }
2445     }
2446
2447     fn ty_and_layout_pointee_info_at(
2448         this: TyAndLayout<'tcx>,
2449         cx: &C,
2450         offset: Size,
2451     ) -> Option<PointeeInfo> {
2452         let tcx = cx.tcx();
2453         let param_env = cx.param_env();
2454
2455         let addr_space_of_ty = |ty: Ty<'tcx>| {
2456             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2457         };
2458
2459         let pointee_info = match *this.ty.kind() {
2460             ty::RawPtr(mt) if offset.bytes() == 0 => {
2461                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2462                     size: layout.size,
2463                     align: layout.align.abi,
2464                     safe: None,
2465                     address_space: addr_space_of_ty(mt.ty),
2466                 })
2467             }
2468             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2469                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2470                     size: layout.size,
2471                     align: layout.align.abi,
2472                     safe: None,
2473                     address_space: cx.data_layout().instruction_address_space,
2474                 })
2475             }
2476             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2477                 let address_space = addr_space_of_ty(ty);
2478                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2479                     // Use conservative pointer kind if not optimizing. This saves us the
2480                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2481                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2482                     PointerKind::Shared
2483                 } else {
2484                     match mt {
2485                         hir::Mutability::Not => {
2486                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2487                                 PointerKind::Frozen
2488                             } else {
2489                                 PointerKind::Shared
2490                             }
2491                         }
2492                         hir::Mutability::Mut => {
2493                             // References to self-referential structures should not be considered
2494                             // noalias, as another pointer to the structure can be obtained, that
2495                             // is not based-on the original reference. We consider all !Unpin
2496                             // types to be potentially self-referential here.
2497                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2498                                 PointerKind::UniqueBorrowed
2499                             } else {
2500                                 PointerKind::Shared
2501                             }
2502                         }
2503                     }
2504                 };
2505
2506                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2507                     size: layout.size,
2508                     align: layout.align.abi,
2509                     safe: Some(kind),
2510                     address_space,
2511                 })
2512             }
2513
2514             _ => {
2515                 let mut data_variant = match this.variants {
2516                     // Within the discriminant field, only the niche itself is
2517                     // always initialized, so we only check for a pointer at its
2518                     // offset.
2519                     //
2520                     // If the niche is a pointer, it's either valid (according
2521                     // to its type), or null (which the niche field's scalar
2522                     // validity range encodes).  This allows using
2523                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2524                     // this will continue to work as long as we don't start
2525                     // using more niches than just null (e.g., the first page of
2526                     // the address space, or unaligned pointers).
2527                     Variants::Multiple {
2528                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2529                         tag_field,
2530                         ..
2531                     } if this.fields.offset(tag_field) == offset => {
2532                         Some(this.for_variant(cx, dataful_variant))
2533                     }
2534                     _ => Some(this),
2535                 };
2536
2537                 if let Some(variant) = data_variant {
2538                     // We're not interested in any unions.
2539                     if let FieldsShape::Union(_) = variant.fields {
2540                         data_variant = None;
2541                     }
2542                 }
2543
2544                 let mut result = None;
2545
2546                 if let Some(variant) = data_variant {
2547                     let ptr_end = offset + Pointer.size(cx);
2548                     for i in 0..variant.fields.count() {
2549                         let field_start = variant.fields.offset(i);
2550                         if field_start <= offset {
2551                             let field = variant.field(cx, i);
2552                             result = field.to_result().ok().and_then(|field| {
2553                                 if ptr_end <= field_start + field.size {
2554                                     // We found the right field, look inside it.
2555                                     let field_info =
2556                                         field.pointee_info_at(cx, offset - field_start);
2557                                     field_info
2558                                 } else {
2559                                     None
2560                                 }
2561                             });
2562                             if result.is_some() {
2563                                 break;
2564                             }
2565                         }
2566                     }
2567                 }
2568
2569                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2570                 if let Some(ref mut pointee) = result {
2571                     if let ty::Adt(def, _) = this.ty.kind() {
2572                         if def.is_box() && offset.bytes() == 0 {
2573                             pointee.safe = Some(PointerKind::UniqueOwned);
2574                         }
2575                     }
2576                 }
2577
2578                 result
2579             }
2580         };
2581
2582         debug!(
2583             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2584             offset,
2585             this.ty.kind(),
2586             pointee_info
2587         );
2588
2589         pointee_info
2590     }
2591 }
2592
2593 impl<'tcx> ty::Instance<'tcx> {
2594     // NOTE(eddyb) this is private to avoid using it from outside of
2595     // `fn_abi_of_instance` - any other uses are either too high-level
2596     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2597     // or should go through `FnAbi` instead, to avoid losing any
2598     // adjustments `fn_abi_of_instance` might be performing.
2599     fn fn_sig_for_fn_abi(
2600         &self,
2601         tcx: TyCtxt<'tcx>,
2602         param_env: ty::ParamEnv<'tcx>,
2603     ) -> ty::PolyFnSig<'tcx> {
2604         let ty = self.ty(tcx, param_env);
2605         match *ty.kind() {
2606             ty::FnDef(..) => {
2607                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2608                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2609                 // (i.e. due to being inside a projection that got normalized, see
2610                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2611                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2612                 let mut sig = match *ty.kind() {
2613                     ty::FnDef(def_id, substs) => tcx
2614                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2615                         .subst(tcx, substs),
2616                     _ => unreachable!(),
2617                 };
2618
2619                 if let ty::InstanceDef::VtableShim(..) = self.def {
2620                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2621                     sig = sig.map_bound(|mut sig| {
2622                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2623                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2624                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2625                         sig
2626                     });
2627                 }
2628                 sig
2629             }
2630             ty::Closure(def_id, substs) => {
2631                 let sig = substs.as_closure().sig();
2632
2633                 let bound_vars = tcx.mk_bound_variable_kinds(
2634                     sig.bound_vars()
2635                         .iter()
2636                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2637                 );
2638                 let br = ty::BoundRegion {
2639                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2640                     kind: ty::BoundRegionKind::BrEnv,
2641                 };
2642                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2643                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2644
2645                 let sig = sig.skip_binder();
2646                 ty::Binder::bind_with_vars(
2647                     tcx.mk_fn_sig(
2648                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2649                         sig.output(),
2650                         sig.c_variadic,
2651                         sig.unsafety,
2652                         sig.abi,
2653                     ),
2654                     bound_vars,
2655                 )
2656             }
2657             ty::Generator(_, substs, _) => {
2658                 let sig = substs.as_generator().poly_sig();
2659
2660                 let bound_vars = tcx.mk_bound_variable_kinds(
2661                     sig.bound_vars()
2662                         .iter()
2663                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2664                 );
2665                 let br = ty::BoundRegion {
2666                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2667                     kind: ty::BoundRegionKind::BrEnv,
2668                 };
2669                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2670                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2671
2672                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2673                 let pin_adt_ref = tcx.adt_def(pin_did);
2674                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2675                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2676
2677                 let sig = sig.skip_binder();
2678                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2679                 let state_adt_ref = tcx.adt_def(state_did);
2680                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2681                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2682                 ty::Binder::bind_with_vars(
2683                     tcx.mk_fn_sig(
2684                         [env_ty, sig.resume_ty].iter(),
2685                         &ret_ty,
2686                         false,
2687                         hir::Unsafety::Normal,
2688                         rustc_target::spec::abi::Abi::Rust,
2689                     ),
2690                     bound_vars,
2691                 )
2692             }
2693             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2694         }
2695     }
2696 }
2697
2698 /// Calculates whether a function's ABI can unwind or not.
2699 ///
2700 /// This takes two primary parameters:
2701 ///
2702 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2703 ///   codegen attrs for a defined function. For function pointers this set of
2704 ///   flags is the empty set. This is only applicable for Rust-defined
2705 ///   functions, and generally isn't needed except for small optimizations where
2706 ///   we try to say a function which otherwise might look like it could unwind
2707 ///   doesn't actually unwind (such as for intrinsics and such).
2708 ///
2709 /// * `abi` - this is the ABI that the function is defined with. This is the
2710 ///   primary factor for determining whether a function can unwind or not.
2711 ///
2712 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2713 /// panics are implemented with unwinds on most platform (when
2714 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2715 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2716 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2717 /// defined for each ABI individually, but it always corresponds to some form of
2718 /// stack-based unwinding (the exact mechanism of which varies
2719 /// platform-by-platform).
2720 ///
2721 /// Rust functions are classified whether or not they can unwind based on the
2722 /// active "panic strategy". In other words Rust functions are considered to
2723 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2724 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2725 /// only if the final panic mode is panic=abort. In this scenario any code
2726 /// previously compiled assuming that a function can unwind is still correct, it
2727 /// just never happens to actually unwind at runtime.
2728 ///
2729 /// This function's answer to whether or not a function can unwind is quite
2730 /// impactful throughout the compiler. This affects things like:
2731 ///
2732 /// * Calling a function which can't unwind means codegen simply ignores any
2733 ///   associated unwinding cleanup.
2734 /// * Calling a function which can unwind from a function which can't unwind
2735 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2736 ///   aborts the process.
2737 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2738 ///   affects various optimizations and codegen.
2739 ///
2740 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2741 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2742 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2743 /// might (from a foreign exception or similar).
2744 #[inline]
2745 pub fn fn_can_unwind<'tcx>(
2746     tcx: TyCtxt<'tcx>,
2747     codegen_fn_attr_flags: CodegenFnAttrFlags,
2748     abi: SpecAbi,
2749 ) -> bool {
2750     // Special attribute for functions which can't unwind.
2751     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2752         return false;
2753     }
2754
2755     // Otherwise if this isn't special then unwinding is generally determined by
2756     // the ABI of the itself. ABIs like `C` have variants which also
2757     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2758     // ABIs have such an option. Otherwise the only other thing here is Rust
2759     // itself, and those ABIs are determined by the panic strategy configured
2760     // for this compilation.
2761     //
2762     // Unfortunately at this time there's also another caveat. Rust [RFC
2763     // 2945][rfc] has been accepted and is in the process of being implemented
2764     // and stabilized. In this interim state we need to deal with historical
2765     // rustc behavior as well as plan for future rustc behavior.
2766     //
2767     // Historically functions declared with `extern "C"` were marked at the
2768     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2769     // or not. This is UB for functions in `panic=unwind` mode that then
2770     // actually panic and unwind. Note that this behavior is true for both
2771     // externally declared functions as well as Rust-defined function.
2772     //
2773     // To fix this UB rustc would like to change in the future to catch unwinds
2774     // from function calls that may unwind within a Rust-defined `extern "C"`
2775     // function and forcibly abort the process, thereby respecting the
2776     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2777     // ready to roll out, so determining whether or not the `C` family of ABIs
2778     // unwinds is conditional not only on their definition but also whether the
2779     // `#![feature(c_unwind)]` feature gate is active.
2780     //
2781     // Note that this means that unlike historical compilers rustc now, by
2782     // default, unconditionally thinks that the `C` ABI may unwind. This will
2783     // prevent some optimization opportunities, however, so we try to scope this
2784     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2785     // to `panic=abort`).
2786     //
2787     // Eventually the check against `c_unwind` here will ideally get removed and
2788     // this'll be a little cleaner as it'll be a straightforward check of the
2789     // ABI.
2790     //
2791     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2792     use SpecAbi::*;
2793     match abi {
2794         C { unwind }
2795         | System { unwind }
2796         | Cdecl { unwind }
2797         | Stdcall { unwind }
2798         | Fastcall { unwind }
2799         | Vectorcall { unwind }
2800         | Thiscall { unwind }
2801         | Aapcs { unwind }
2802         | Win64 { unwind }
2803         | SysV64 { unwind } => {
2804             unwind
2805                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2806         }
2807         PtxKernel
2808         | Msp430Interrupt
2809         | X86Interrupt
2810         | AmdGpuKernel
2811         | EfiApi
2812         | AvrInterrupt
2813         | AvrNonBlockingInterrupt
2814         | CCmseNonSecureCall
2815         | Wasm
2816         | RustIntrinsic
2817         | PlatformIntrinsic
2818         | Unadjusted => false,
2819         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2820     }
2821 }
2822
2823 #[inline]
2824 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2825     use rustc_target::spec::abi::Abi::*;
2826     match tcx.sess.target.adjust_abi(abi) {
2827         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2828
2829         // It's the ABI's job to select this, not ours.
2830         System { .. } => bug!("system abi should be selected elsewhere"),
2831         EfiApi => bug!("eficall abi should be selected elsewhere"),
2832
2833         Stdcall { .. } => Conv::X86Stdcall,
2834         Fastcall { .. } => Conv::X86Fastcall,
2835         Vectorcall { .. } => Conv::X86VectorCall,
2836         Thiscall { .. } => Conv::X86ThisCall,
2837         C { .. } => Conv::C,
2838         Unadjusted => Conv::C,
2839         Win64 { .. } => Conv::X86_64Win64,
2840         SysV64 { .. } => Conv::X86_64SysV,
2841         Aapcs { .. } => Conv::ArmAapcs,
2842         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2843         PtxKernel => Conv::PtxKernel,
2844         Msp430Interrupt => Conv::Msp430Intr,
2845         X86Interrupt => Conv::X86Intr,
2846         AmdGpuKernel => Conv::AmdGpuKernel,
2847         AvrInterrupt => Conv::AvrInterrupt,
2848         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2849         Wasm => Conv::C,
2850
2851         // These API constants ought to be more specific...
2852         Cdecl { .. } => Conv::C,
2853     }
2854 }
2855
2856 /// Error produced by attempting to compute or adjust a `FnAbi`.
2857 #[derive(Copy, Clone, Debug, HashStable)]
2858 pub enum FnAbiError<'tcx> {
2859     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2860     Layout(LayoutError<'tcx>),
2861
2862     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2863     AdjustForForeignAbi(call::AdjustForForeignAbiError),
2864 }
2865
2866 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2867     fn from(err: LayoutError<'tcx>) -> Self {
2868         Self::Layout(err)
2869     }
2870 }
2871
2872 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2873     fn from(err: call::AdjustForForeignAbiError) -> Self {
2874         Self::AdjustForForeignAbi(err)
2875     }
2876 }
2877
2878 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2879     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2880         match self {
2881             Self::Layout(err) => err.fmt(f),
2882             Self::AdjustForForeignAbi(err) => err.fmt(f),
2883         }
2884     }
2885 }
2886
2887 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2888 // just for error handling.
2889 #[derive(Debug)]
2890 pub enum FnAbiRequest<'tcx> {
2891     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2892     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2893 }
2894
2895 /// Trait for contexts that want to be able to compute `FnAbi`s.
2896 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2897 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2898     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2899     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2900     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2901
2902     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2903     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2904     ///
2905     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2906     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2907     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2908     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2909     fn handle_fn_abi_err(
2910         &self,
2911         err: FnAbiError<'tcx>,
2912         span: Span,
2913         fn_abi_request: FnAbiRequest<'tcx>,
2914     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2915 }
2916
2917 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2918 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2919     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2920     ///
2921     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2922     /// instead, where the instance is an `InstanceDef::Virtual`.
2923     #[inline]
2924     fn fn_abi_of_fn_ptr(
2925         &self,
2926         sig: ty::PolyFnSig<'tcx>,
2927         extra_args: &'tcx ty::List<Ty<'tcx>>,
2928     ) -> Self::FnAbiOfResult {
2929         // FIXME(eddyb) get a better `span` here.
2930         let span = self.layout_tcx_at_span();
2931         let tcx = self.tcx().at(span);
2932
2933         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2934             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2935         ))
2936     }
2937
2938     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2939     /// direct calls to an `fn`.
2940     ///
2941     /// NB: that includes virtual calls, which are represented by "direct calls"
2942     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2943     #[inline]
2944     fn fn_abi_of_instance(
2945         &self,
2946         instance: ty::Instance<'tcx>,
2947         extra_args: &'tcx ty::List<Ty<'tcx>>,
2948     ) -> Self::FnAbiOfResult {
2949         // FIXME(eddyb) get a better `span` here.
2950         let span = self.layout_tcx_at_span();
2951         let tcx = self.tcx().at(span);
2952
2953         MaybeResult::from(
2954             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2955                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2956                 // we can get some kind of span even if one wasn't provided.
2957                 // However, we don't do this early in order to avoid calling
2958                 // `def_span` unconditionally (which may have a perf penalty).
2959                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2960                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2961             }),
2962         )
2963     }
2964 }
2965
2966 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2967
2968 fn fn_abi_of_fn_ptr<'tcx>(
2969     tcx: TyCtxt<'tcx>,
2970     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2971 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2972     let (param_env, (sig, extra_args)) = query.into_parts();
2973
2974     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2975         sig,
2976         extra_args,
2977         None,
2978         CodegenFnAttrFlags::empty(),
2979         false,
2980     )
2981 }
2982
2983 fn fn_abi_of_instance<'tcx>(
2984     tcx: TyCtxt<'tcx>,
2985     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2986 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2987     let (param_env, (instance, extra_args)) = query.into_parts();
2988
2989     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
2990
2991     let caller_location = if instance.def.requires_caller_location(tcx) {
2992         Some(tcx.caller_location_ty())
2993     } else {
2994         None
2995     };
2996
2997     let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
2998
2999     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3000         sig,
3001         extra_args,
3002         caller_location,
3003         attrs,
3004         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3005     )
3006 }
3007
3008 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3009     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3010     // arguments of this method, into a separate `struct`.
3011     fn fn_abi_new_uncached(
3012         &self,
3013         sig: ty::PolyFnSig<'tcx>,
3014         extra_args: &[Ty<'tcx>],
3015         caller_location: Option<Ty<'tcx>>,
3016         codegen_fn_attr_flags: CodegenFnAttrFlags,
3017         // FIXME(eddyb) replace this with something typed, like an `enum`.
3018         force_thin_self_ptr: bool,
3019     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3020         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3021
3022         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3023
3024         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3025
3026         let mut inputs = sig.inputs();
3027         let extra_args = if sig.abi == RustCall {
3028             assert!(!sig.c_variadic && extra_args.is_empty());
3029
3030             if let Some(input) = sig.inputs().last() {
3031                 if let ty::Tuple(tupled_arguments) = input.kind() {
3032                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3033                     tupled_arguments
3034                 } else {
3035                     bug!(
3036                         "argument to function with \"rust-call\" ABI \
3037                             is not a tuple"
3038                     );
3039                 }
3040             } else {
3041                 bug!(
3042                     "argument to function with \"rust-call\" ABI \
3043                         is not a tuple"
3044                 );
3045             }
3046         } else {
3047             assert!(sig.c_variadic || extra_args.is_empty());
3048             extra_args
3049         };
3050
3051         let target = &self.tcx.sess.target;
3052         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3053         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3054         let linux_s390x_gnu_like =
3055             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3056         let linux_sparc64_gnu_like =
3057             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3058         let linux_powerpc_gnu_like =
3059             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3060         use SpecAbi::*;
3061         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3062
3063         // Handle safe Rust thin and fat pointers.
3064         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3065                                       scalar: Scalar,
3066                                       layout: TyAndLayout<'tcx>,
3067                                       offset: Size,
3068                                       is_return: bool| {
3069             // Booleans are always a noundef i1 that needs to be zero-extended.
3070             if scalar.is_bool() {
3071                 attrs.ext(ArgExtension::Zext);
3072                 attrs.set(ArgAttribute::NoUndef);
3073                 return;
3074             }
3075
3076             // Scalars which have invalid values cannot be undef.
3077             if !scalar.is_always_valid(self) {
3078                 attrs.set(ArgAttribute::NoUndef);
3079             }
3080
3081             // Only pointer types handled below.
3082             if scalar.value != Pointer {
3083                 return;
3084             }
3085
3086             if !scalar.valid_range.contains(0) {
3087                 attrs.set(ArgAttribute::NonNull);
3088             }
3089
3090             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3091                 if let Some(kind) = pointee.safe {
3092                     attrs.pointee_align = Some(pointee.align);
3093
3094                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3095                     // for the entire duration of the function as they can be deallocated
3096                     // at any time. Set their valid size to 0.
3097                     attrs.pointee_size = match kind {
3098                         PointerKind::UniqueOwned => Size::ZERO,
3099                         _ => pointee.size,
3100                     };
3101
3102                     // `Box`, `&T`, and `&mut T` cannot be undef.
3103                     // Note that this only applies to the value of the pointer itself;
3104                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3105                     attrs.set(ArgAttribute::NoUndef);
3106
3107                     // `Box` pointer parameters never alias because ownership is transferred
3108                     // `&mut` pointer parameters never alias other parameters,
3109                     // or mutable global data
3110                     //
3111                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3112                     // and can be marked as both `readonly` and `noalias`, as
3113                     // LLVM's definition of `noalias` is based solely on memory
3114                     // dependencies rather than pointer equality
3115                     //
3116                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3117                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3118                     // or not to actually emit the attribute. It can also be controlled with the
3119                     // `-Zmutable-noalias` debugging option.
3120                     let no_alias = match kind {
3121                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3122                         PointerKind::UniqueOwned => true,
3123                         PointerKind::Frozen => !is_return,
3124                     };
3125                     if no_alias {
3126                         attrs.set(ArgAttribute::NoAlias);
3127                     }
3128
3129                     if kind == PointerKind::Frozen && !is_return {
3130                         attrs.set(ArgAttribute::ReadOnly);
3131                     }
3132
3133                     if kind == PointerKind::UniqueBorrowed && !is_return {
3134                         attrs.set(ArgAttribute::NoAliasMutRef);
3135                     }
3136                 }
3137             }
3138         };
3139
3140         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3141             let is_return = arg_idx.is_none();
3142
3143             let layout = self.layout_of(ty)?;
3144             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3145                 // Don't pass the vtable, it's not an argument of the virtual fn.
3146                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3147                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3148                 make_thin_self_ptr(self, layout)
3149             } else {
3150                 layout
3151             };
3152
3153             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3154                 let mut attrs = ArgAttributes::new();
3155                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3156                 attrs
3157             });
3158
3159             if arg.layout.is_zst() {
3160                 // For some forsaken reason, x86_64-pc-windows-gnu
3161                 // doesn't ignore zero-sized struct arguments.
3162                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3163                 if is_return
3164                     || rust_abi
3165                     || (!win_x64_gnu
3166                         && !linux_s390x_gnu_like
3167                         && !linux_sparc64_gnu_like
3168                         && !linux_powerpc_gnu_like)
3169                 {
3170                     arg.mode = PassMode::Ignore;
3171                 }
3172             }
3173
3174             Ok(arg)
3175         };
3176
3177         let mut fn_abi = FnAbi {
3178             ret: arg_of(sig.output(), None)?,
3179             args: inputs
3180                 .iter()
3181                 .copied()
3182                 .chain(extra_args.iter().copied())
3183                 .chain(caller_location)
3184                 .enumerate()
3185                 .map(|(i, ty)| arg_of(ty, Some(i)))
3186                 .collect::<Result<_, _>>()?,
3187             c_variadic: sig.c_variadic,
3188             fixed_count: inputs.len(),
3189             conv,
3190             can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3191         };
3192         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3193         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3194         Ok(self.tcx.arena.alloc(fn_abi))
3195     }
3196
3197     fn fn_abi_adjust_for_abi(
3198         &self,
3199         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3200         abi: SpecAbi,
3201     ) -> Result<(), FnAbiError<'tcx>> {
3202         if abi == SpecAbi::Unadjusted {
3203             return Ok(());
3204         }
3205
3206         if abi == SpecAbi::Rust
3207             || abi == SpecAbi::RustCall
3208             || abi == SpecAbi::RustIntrinsic
3209             || abi == SpecAbi::PlatformIntrinsic
3210         {
3211             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3212                 if arg.is_ignore() {
3213                     return;
3214                 }
3215
3216                 match arg.layout.abi {
3217                     Abi::Aggregate { .. } => {}
3218
3219                     // This is a fun case! The gist of what this is doing is
3220                     // that we want callers and callees to always agree on the
3221                     // ABI of how they pass SIMD arguments. If we were to *not*
3222                     // make these arguments indirect then they'd be immediates
3223                     // in LLVM, which means that they'd used whatever the
3224                     // appropriate ABI is for the callee and the caller. That
3225                     // means, for example, if the caller doesn't have AVX
3226                     // enabled but the callee does, then passing an AVX argument
3227                     // across this boundary would cause corrupt data to show up.
3228                     //
3229                     // This problem is fixed by unconditionally passing SIMD
3230                     // arguments through memory between callers and callees
3231                     // which should get them all to agree on ABI regardless of
3232                     // target feature sets. Some more information about this
3233                     // issue can be found in #44367.
3234                     //
3235                     // Note that the platform intrinsic ABI is exempt here as
3236                     // that's how we connect up to LLVM and it's unstable
3237                     // anyway, we control all calls to it in libstd.
3238                     Abi::Vector { .. }
3239                         if abi != SpecAbi::PlatformIntrinsic
3240                             && self.tcx.sess.target.simd_types_indirect =>
3241                     {
3242                         arg.make_indirect();
3243                         return;
3244                     }
3245
3246                     _ => return,
3247                 }
3248
3249                 let size = arg.layout.size;
3250                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3251                     arg.make_indirect();
3252                 } else {
3253                     // We want to pass small aggregates as immediates, but using
3254                     // a LLVM aggregate type for this leads to bad optimizations,
3255                     // so we pick an appropriately sized integer type instead.
3256                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3257                 }
3258             };
3259             fixup(&mut fn_abi.ret);
3260             for arg in &mut fn_abi.args {
3261                 fixup(arg);
3262             }
3263         } else {
3264             fn_abi.adjust_for_foreign_abi(self, abi)?;
3265         }
3266
3267         Ok(())
3268     }
3269 }
3270
3271 fn make_thin_self_ptr<'tcx>(
3272     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3273     layout: TyAndLayout<'tcx>,
3274 ) -> TyAndLayout<'tcx> {
3275     let tcx = cx.tcx();
3276     let fat_pointer_ty = if layout.is_unsized() {
3277         // unsized `self` is passed as a pointer to `self`
3278         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3279         tcx.mk_mut_ptr(layout.ty)
3280     } else {
3281         match layout.abi {
3282             Abi::ScalarPair(..) => (),
3283             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3284         }
3285
3286         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3287         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3288         // elsewhere in the compiler as a method on a `dyn Trait`.
3289         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3290         // get a built-in pointer type
3291         let mut fat_pointer_layout = layout;
3292         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3293             && !fat_pointer_layout.ty.is_region_ptr()
3294         {
3295             for i in 0..fat_pointer_layout.fields.count() {
3296                 let field_layout = fat_pointer_layout.field(cx, i);
3297
3298                 if !field_layout.is_zst() {
3299                     fat_pointer_layout = field_layout;
3300                     continue 'descend_newtypes;
3301                 }
3302             }
3303
3304             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3305         }
3306
3307         fat_pointer_layout.ty
3308     };
3309
3310     // we now have a type like `*mut RcBox<dyn Trait>`
3311     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3312     // this is understood as a special case elsewhere in the compiler
3313     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3314
3315     TyAndLayout {
3316         ty: fat_pointer_ty,
3317
3318         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3319         // should always work because the type is always `*mut ()`.
3320         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3321     }
3322 }