]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
c8055100d30968a5c1c9f0df09fa9d5400b72f38
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_hir as hir;
9 use rustc_hir::def_id::DefId;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
18 };
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
21
22 use std::cmp;
23 use std::fmt;
24 use std::iter;
25 use std::num::NonZeroUsize;
26 use std::ops::Bound;
27
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
30
31 pub fn provide(providers: &mut ty::query::Providers) {
32     *providers =
33         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
34 }
35
36 pub trait IntegerExt {
37     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
41     fn repr_discr<'tcx>(
42         tcx: TyCtxt<'tcx>,
43         ty: Ty<'tcx>,
44         repr: &ReprOptions,
45         min: i128,
46         max: i128,
47     ) -> (Integer, bool);
48 }
49
50 impl IntegerExt for Integer {
51     #[inline]
52     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53         match (*self, signed) {
54             (I8, false) => tcx.types.u8,
55             (I16, false) => tcx.types.u16,
56             (I32, false) => tcx.types.u32,
57             (I64, false) => tcx.types.u64,
58             (I128, false) => tcx.types.u128,
59             (I8, true) => tcx.types.i8,
60             (I16, true) => tcx.types.i16,
61             (I32, true) => tcx.types.i32,
62             (I64, true) => tcx.types.i64,
63             (I128, true) => tcx.types.i128,
64         }
65     }
66
67     /// Gets the Integer type from an attr::IntType.
68     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69         let dl = cx.data_layout();
70
71         match ity {
72             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78                 dl.ptr_sized_integer()
79             }
80         }
81     }
82
83     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
84         match ity {
85             ty::IntTy::I8 => I8,
86             ty::IntTy::I16 => I16,
87             ty::IntTy::I32 => I32,
88             ty::IntTy::I64 => I64,
89             ty::IntTy::I128 => I128,
90             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
91         }
92     }
93     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
94         match ity {
95             ty::UintTy::U8 => I8,
96             ty::UintTy::U16 => I16,
97             ty::UintTy::U32 => I32,
98             ty::UintTy::U64 => I64,
99             ty::UintTy::U128 => I128,
100             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
101         }
102     }
103
104     /// Finds the appropriate Integer type and signedness for the given
105     /// signed discriminant range and `#[repr]` attribute.
106     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107     /// that shouldn't affect anything, other than maybe debuginfo.
108     fn repr_discr<'tcx>(
109         tcx: TyCtxt<'tcx>,
110         ty: Ty<'tcx>,
111         repr: &ReprOptions,
112         min: i128,
113         max: i128,
114     ) -> (Integer, bool) {
115         // Theoretically, negative values could be larger in unsigned representation
116         // than the unsigned representation of the signed minimum. However, if there
117         // are any negative values, the only valid unsigned representation is u128
118         // which can fit all i128 values, so the result remains unaffected.
119         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
121
122         if let Some(ity) = repr.int {
123             let discr = Integer::from_attr(&tcx, ity);
124             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
125             if discr < fit {
126                 bug!(
127                     "Integer::repr_discr: `#[repr]` hint too small for \
128                       discriminant range of enum `{}",
129                     ty
130                 )
131             }
132             return (discr, ity.is_signed());
133         }
134
135         let at_least = if repr.c() {
136             // This is usually I32, however it can be different on some platforms,
137             // notably hexagon and arm-none/thumb-none
138             tcx.data_layout().c_enum_min_size
139         } else {
140             // repr(Rust) enums try to be as small as possible
141             I8
142         };
143
144         // If there are no negative values, we can use the unsigned fit.
145         if min >= 0 {
146             (cmp::max(unsigned_fit, at_least), false)
147         } else {
148             (cmp::max(signed_fit, at_least), true)
149         }
150     }
151 }
152
153 pub trait PrimitiveExt {
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 }
157
158 impl PrimitiveExt for Primitive {
159     #[inline]
160     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161         match *self {
162             Int(i, signed) => i.to_ty(tcx, signed),
163             F32 => tcx.types.f32,
164             F64 => tcx.types.f64,
165             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
166         }
167     }
168
169     /// Return an *integer* type matching this primitive.
170     /// Useful in particular when dealing with enum discriminants.
171     #[inline]
172     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173         match *self {
174             Int(i, signed) => i.to_ty(tcx, signed),
175             Pointer => tcx.types.usize,
176             F32 | F64 => bug!("floats do not have an int type"),
177         }
178     }
179 }
180
181 /// The first half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
186
187 /// The second half of a fat pointer.
188 ///
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
192
193 /// The maximum supported number of lanes in a SIMD vector.
194 ///
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
202     Unknown(Ty<'tcx>),
203     SizeOverflow(Ty<'tcx>),
204     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
205 }
206
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209         match *self {
210             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211             LayoutError::SizeOverflow(ty) => {
212                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213             }
214             LayoutError::NormalizationFailure(t, e) => write!(
215                 f,
216                 "unable to determine layout for `{}` because `{}` cannot be normalized",
217                 t,
218                 e.get_type_for_failure()
219             ),
220         }
221     }
222 }
223
224 /// Enforce some basic invariants on layouts.
225 fn sanity_check_layout<'tcx>(
226     tcx: TyCtxt<'tcx>,
227     param_env: ty::ParamEnv<'tcx>,
228     layout: &TyAndLayout<'tcx>,
229 ) {
230     // Type-level uninhabitedness should always imply ABI uninhabitedness.
231     if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) {
232         assert!(layout.abi.is_uninhabited());
233     }
234
235     if cfg!(debug_assertions) {
236         fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) {
237             match layout.abi() {
238                 Abi::Scalar(_scalar) => {
239                     // No padding in scalars.
240                     /* FIXME(#96185):
241                     assert_eq!(
242                         layout.align().abi,
243                         scalar.align(&tcx).abi,
244                         "alignment mismatch between ABI and layout in {layout:#?}"
245                     );
246                     assert_eq!(
247                         layout.size(),
248                         scalar.size(&tcx),
249                         "size mismatch between ABI and layout in {layout:#?}"
250                     );*/
251                 }
252                 Abi::Vector { count, element } => {
253                     // No padding in vectors. Alignment can be strengthened, though.
254                     assert!(
255                         layout.align().abi >= element.align(&tcx).abi,
256                         "alignment mismatch between ABI and layout in {layout:#?}"
257                     );
258                     let size = element.size(&tcx) * count;
259                     assert_eq!(
260                         layout.size(),
261                         size.align_to(tcx.data_layout().vector_align(size).abi),
262                         "size mismatch between ABI and layout in {layout:#?}"
263                     );
264                 }
265                 Abi::ScalarPair(scalar1, scalar2) => {
266                     // Sanity-check scalar pairs. These are a bit more flexible and support
267                     // padding, but we can at least ensure both fields actually fit into the layout
268                     // and the alignment requirement has not been weakened.
269                     let align1 = scalar1.align(&tcx).abi;
270                     let align2 = scalar2.align(&tcx).abi;
271                     assert!(
272                         layout.align().abi >= cmp::max(align1, align2),
273                         "alignment mismatch between ABI and layout in {layout:#?}",
274                     );
275                     let field2_offset = scalar1.size(&tcx).align_to(align2);
276                     assert!(
277                         layout.size() >= field2_offset + scalar2.size(&tcx),
278                         "size mismatch between ABI and layout in {layout:#?}"
279                     );
280                 }
281                 Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
282             }
283         }
284
285         check_layout_abi(tcx, layout.layout);
286
287         if let Variants::Multiple { variants, .. } = &layout.variants {
288             for variant in variants {
289                 check_layout_abi(tcx, *variant);
290                 // No nested "multiple".
291                 assert!(matches!(variant.variants(), Variants::Single { .. }));
292                 // Skip empty variants.
293                 if variant.size() == Size::ZERO
294                     || variant.fields().count() == 0
295                     || variant.abi().is_uninhabited()
296                 {
297                     // These are never actually accessed anyway, so we can skip them. (Note that
298                     // sometimes, variants with fields have size 0, and sometimes, variants without
299                     // fields have non-0 size.)
300                     continue;
301                 }
302                 // Variants should have the same or a smaller size as the full thing.
303                 if variant.size() > layout.size {
304                     bug!(
305                         "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
306                         layout.size.bytes(),
307                         variant.size().bytes(),
308                     )
309                 }
310                 // The top-level ABI and the ABI of the variants should be coherent.
311                 let abi_coherent = match (layout.abi, variant.abi()) {
312                     (Abi::Scalar(..), Abi::Scalar(..)) => true,
313                     (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
314                     (Abi::Uninhabited, _) => true,
315                     (Abi::Aggregate { .. }, _) => true,
316                     _ => false,
317                 };
318                 if !abi_coherent {
319                     bug!(
320                         "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
321                         variant
322                     );
323                 }
324             }
325         }
326     }
327 }
328
329 #[instrument(skip(tcx, query), level = "debug")]
330 fn layout_of<'tcx>(
331     tcx: TyCtxt<'tcx>,
332     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
333 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
334     ty::tls::with_related_context(tcx, move |icx| {
335         let (param_env, ty) = query.into_parts();
336         debug!(?ty);
337
338         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
339             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
340         }
341
342         // Update the ImplicitCtxt to increase the layout_depth
343         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
344
345         ty::tls::enter_context(&icx, |_| {
346             let param_env = param_env.with_reveal_all_normalized(tcx);
347             let unnormalized_ty = ty;
348
349             // FIXME: We might want to have two different versions of `layout_of`:
350             // One that can be called after typecheck has completed and can use
351             // `normalize_erasing_regions` here and another one that can be called
352             // before typecheck has completed and uses `try_normalize_erasing_regions`.
353             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
354                 Ok(t) => t,
355                 Err(normalization_error) => {
356                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
357                 }
358             };
359
360             if ty != unnormalized_ty {
361                 // Ensure this layout is also cached for the normalized type.
362                 return tcx.layout_of(param_env.and(ty));
363             }
364
365             let cx = LayoutCx { tcx, param_env };
366
367             let layout = cx.layout_of_uncached(ty)?;
368             let layout = TyAndLayout { ty, layout };
369
370             cx.record_layout_for_printing(layout);
371
372             sanity_check_layout(tcx, param_env, &layout);
373
374             Ok(layout)
375         })
376     })
377 }
378
379 pub struct LayoutCx<'tcx, C> {
380     pub tcx: C,
381     pub param_env: ty::ParamEnv<'tcx>,
382 }
383
384 #[derive(Copy, Clone, Debug)]
385 enum StructKind {
386     /// A tuple, closure, or univariant which cannot be coerced to unsized.
387     AlwaysSized,
388     /// A univariant, the last field of which may be coerced to unsized.
389     MaybeUnsized,
390     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
391     Prefixed(Size, Align),
392 }
393
394 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
395 // This is used to go between `memory_index` (source field order to memory order)
396 // and `inverse_memory_index` (memory order to source field order).
397 // See also `FieldsShape::Arbitrary::memory_index` for more details.
398 // FIXME(eddyb) build a better abstraction for permutations, if possible.
399 fn invert_mapping(map: &[u32]) -> Vec<u32> {
400     let mut inverse = vec![0; map.len()];
401     for i in 0..map.len() {
402         inverse[map[i] as usize] = i as u32;
403     }
404     inverse
405 }
406
407 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
408     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
409         let dl = self.data_layout();
410         let b_align = b.align(dl);
411         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
412         let b_offset = a.size(dl).align_to(b_align.abi);
413         let size = (b_offset + b.size(dl)).align_to(align.abi);
414
415         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
416         // returns the last maximum.
417         let largest_niche = Niche::from_scalar(dl, b_offset, b)
418             .into_iter()
419             .chain(Niche::from_scalar(dl, Size::ZERO, a))
420             .max_by_key(|niche| niche.available(dl));
421
422         LayoutS {
423             variants: Variants::Single { index: VariantIdx::new(0) },
424             fields: FieldsShape::Arbitrary {
425                 offsets: vec![Size::ZERO, b_offset],
426                 memory_index: vec![0, 1],
427             },
428             abi: Abi::ScalarPair(a, b),
429             largest_niche,
430             align,
431             size,
432         }
433     }
434
435     fn univariant_uninterned(
436         &self,
437         ty: Ty<'tcx>,
438         fields: &[TyAndLayout<'_>],
439         repr: &ReprOptions,
440         kind: StructKind,
441     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
442         let dl = self.data_layout();
443         let pack = repr.pack;
444         if pack.is_some() && repr.align.is_some() {
445             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
446             return Err(LayoutError::Unknown(ty));
447         }
448
449         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
450
451         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
452
453         let optimize = !repr.inhibit_struct_field_reordering_opt();
454         if optimize {
455             let end =
456                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
457             let optimizing = &mut inverse_memory_index[..end];
458             let field_align = |f: &TyAndLayout<'_>| {
459                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
460             };
461
462             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
463             // the field ordering to try and catch some code making assumptions about layouts
464             // we don't guarantee
465             if repr.can_randomize_type_layout() {
466                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
467                 // randomize field ordering with
468                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
469
470                 // Shuffle the ordering of the fields
471                 optimizing.shuffle(&mut rng);
472
473             // Otherwise we just leave things alone and actually optimize the type's fields
474             } else {
475                 match kind {
476                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
477                         optimizing.sort_by_key(|&x| {
478                             // Place ZSTs first to avoid "interesting offsets",
479                             // especially with only one or two non-ZST fields.
480                             let f = &fields[x as usize];
481                             (!f.is_zst(), cmp::Reverse(field_align(f)))
482                         });
483                     }
484
485                     StructKind::Prefixed(..) => {
486                         // Sort in ascending alignment so that the layout stays optimal
487                         // regardless of the prefix
488                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
489                     }
490                 }
491
492                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
493                 //                 regardless of the status of `-Z randomize-layout`
494             }
495         }
496
497         // inverse_memory_index holds field indices by increasing memory offset.
498         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
499         // We now write field offsets to the corresponding offset slot;
500         // field 5 with offset 0 puts 0 in offsets[5].
501         // At the bottom of this function, we invert `inverse_memory_index` to
502         // produce `memory_index` (see `invert_mapping`).
503
504         let mut sized = true;
505         let mut offsets = vec![Size::ZERO; fields.len()];
506         let mut offset = Size::ZERO;
507         let mut largest_niche = None;
508         let mut largest_niche_available = 0;
509
510         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
511             let prefix_align =
512                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
513             align = align.max(AbiAndPrefAlign::new(prefix_align));
514             offset = prefix_size.align_to(prefix_align);
515         }
516
517         for &i in &inverse_memory_index {
518             let field = fields[i as usize];
519             if !sized {
520                 self.tcx.sess.delay_span_bug(
521                     DUMMY_SP,
522                     &format!(
523                         "univariant: field #{} of `{}` comes after unsized field",
524                         offsets.len(),
525                         ty
526                     ),
527                 );
528             }
529
530             if field.is_unsized() {
531                 sized = false;
532             }
533
534             // Invariant: offset < dl.obj_size_bound() <= 1<<61
535             let field_align = if let Some(pack) = pack {
536                 field.align.min(AbiAndPrefAlign::new(pack))
537             } else {
538                 field.align
539             };
540             offset = offset.align_to(field_align.abi);
541             align = align.max(field_align);
542
543             debug!("univariant offset: {:?} field: {:#?}", offset, field);
544             offsets[i as usize] = offset;
545
546             if !repr.hide_niche() {
547                 if let Some(mut niche) = field.largest_niche {
548                     let available = niche.available(dl);
549                     if available > largest_niche_available {
550                         largest_niche_available = available;
551                         niche.offset += offset;
552                         largest_niche = Some(niche);
553                     }
554                 }
555             }
556
557             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
558         }
559
560         if let Some(repr_align) = repr.align {
561             align = align.max(AbiAndPrefAlign::new(repr_align));
562         }
563
564         debug!("univariant min_size: {:?}", offset);
565         let min_size = offset;
566
567         // As stated above, inverse_memory_index holds field indices by increasing offset.
568         // This makes it an already-sorted view of the offsets vec.
569         // To invert it, consider:
570         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
571         // Field 5 would be the first element, so memory_index is i:
572         // Note: if we didn't optimize, it's already right.
573
574         let memory_index =
575             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
576
577         let size = min_size.align_to(align.abi);
578         let mut abi = Abi::Aggregate { sized };
579
580         // Unpack newtype ABIs and find scalar pairs.
581         if sized && size.bytes() > 0 {
582             // All other fields must be ZSTs.
583             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
584
585             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
586                 // We have exactly one non-ZST field.
587                 (Some((i, field)), None, None) => {
588                     // Field fills the struct and it has a scalar or scalar pair ABI.
589                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
590                     {
591                         match field.abi {
592                             // For plain scalars, or vectors of them, we can't unpack
593                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
594                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
595                                 abi = field.abi;
596                             }
597                             // But scalar pairs are Rust-specific and get
598                             // treated as aggregates by C ABIs anyway.
599                             Abi::ScalarPair(..) => {
600                                 abi = field.abi;
601                             }
602                             _ => {}
603                         }
604                     }
605                 }
606
607                 // Two non-ZST fields, and they're both scalars.
608                 (Some((i, a)), Some((j, b)), None) => {
609                     match (a.abi, b.abi) {
610                         (Abi::Scalar(a), Abi::Scalar(b)) => {
611                             // Order by the memory placement, not source order.
612                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
613                                 ((i, a), (j, b))
614                             } else {
615                                 ((j, b), (i, a))
616                             };
617                             let pair = self.scalar_pair(a, b);
618                             let pair_offsets = match pair.fields {
619                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
620                                     assert_eq!(memory_index, &[0, 1]);
621                                     offsets
622                                 }
623                                 _ => bug!(),
624                             };
625                             if offsets[i] == pair_offsets[0]
626                                 && offsets[j] == pair_offsets[1]
627                                 && align == pair.align
628                                 && size == pair.size
629                             {
630                                 // We can use `ScalarPair` only when it matches our
631                                 // already computed layout (including `#[repr(C)]`).
632                                 abi = pair.abi;
633                             }
634                         }
635                         _ => {}
636                     }
637                 }
638
639                 _ => {}
640             }
641         }
642
643         if fields.iter().any(|f| f.abi.is_uninhabited()) {
644             abi = Abi::Uninhabited;
645         }
646
647         Ok(LayoutS {
648             variants: Variants::Single { index: VariantIdx::new(0) },
649             fields: FieldsShape::Arbitrary { offsets, memory_index },
650             abi,
651             largest_niche,
652             align,
653             size,
654         })
655     }
656
657     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
658         let tcx = self.tcx;
659         let param_env = self.param_env;
660         let dl = self.data_layout();
661         let scalar_unit = |value: Primitive| {
662             let size = value.size(dl);
663             assert!(size.bits() <= 128);
664             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
665         };
666         let scalar =
667             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
668
669         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
670             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
671         };
672         debug_assert!(!ty.has_infer_types_or_consts());
673
674         Ok(match *ty.kind() {
675             // Basic scalars.
676             ty::Bool => tcx.intern_layout(LayoutS::scalar(
677                 self,
678                 Scalar::Initialized {
679                     value: Int(I8, false),
680                     valid_range: WrappingRange { start: 0, end: 1 },
681                 },
682             )),
683             ty::Char => tcx.intern_layout(LayoutS::scalar(
684                 self,
685                 Scalar::Initialized {
686                     value: Int(I32, false),
687                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
688                 },
689             )),
690             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
691             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
692             ty::Float(fty) => scalar(match fty {
693                 ty::FloatTy::F32 => F32,
694                 ty::FloatTy::F64 => F64,
695             }),
696             ty::FnPtr(_) => {
697                 let mut ptr = scalar_unit(Pointer);
698                 ptr.valid_range_mut().start = 1;
699                 tcx.intern_layout(LayoutS::scalar(self, ptr))
700             }
701
702             // The never type.
703             ty::Never => tcx.intern_layout(LayoutS {
704                 variants: Variants::Single { index: VariantIdx::new(0) },
705                 fields: FieldsShape::Primitive,
706                 abi: Abi::Uninhabited,
707                 largest_niche: None,
708                 align: dl.i8_align,
709                 size: Size::ZERO,
710             }),
711
712             // Potentially-wide pointers.
713             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
714                 let mut data_ptr = scalar_unit(Pointer);
715                 if !ty.is_unsafe_ptr() {
716                     data_ptr.valid_range_mut().start = 1;
717                 }
718
719                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
720                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
721                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
722                 }
723
724                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
725                 let metadata = match unsized_part.kind() {
726                     ty::Foreign(..) => {
727                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
728                     }
729                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
730                     ty::Dynamic(..) => {
731                         let mut vtable = scalar_unit(Pointer);
732                         vtable.valid_range_mut().start = 1;
733                         vtable
734                     }
735                     _ => return Err(LayoutError::Unknown(unsized_part)),
736                 };
737
738                 // Effectively a (ptr, meta) tuple.
739                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
740             }
741
742             // Arrays and slices.
743             ty::Array(element, mut count) => {
744                 if count.has_projections() {
745                     count = tcx.normalize_erasing_regions(param_env, count);
746                     if count.has_projections() {
747                         return Err(LayoutError::Unknown(ty));
748                     }
749                 }
750
751                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
752                 let element = self.layout_of(element)?;
753                 let size =
754                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
755
756                 let abi =
757                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
758                         Abi::Uninhabited
759                     } else {
760                         Abi::Aggregate { sized: true }
761                     };
762
763                 let largest_niche = if count != 0 { element.largest_niche } else { None };
764
765                 tcx.intern_layout(LayoutS {
766                     variants: Variants::Single { index: VariantIdx::new(0) },
767                     fields: FieldsShape::Array { stride: element.size, count },
768                     abi,
769                     largest_niche,
770                     align: element.align,
771                     size,
772                 })
773             }
774             ty::Slice(element) => {
775                 let element = self.layout_of(element)?;
776                 tcx.intern_layout(LayoutS {
777                     variants: Variants::Single { index: VariantIdx::new(0) },
778                     fields: FieldsShape::Array { stride: element.size, count: 0 },
779                     abi: Abi::Aggregate { sized: false },
780                     largest_niche: None,
781                     align: element.align,
782                     size: Size::ZERO,
783                 })
784             }
785             ty::Str => tcx.intern_layout(LayoutS {
786                 variants: Variants::Single { index: VariantIdx::new(0) },
787                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
788                 abi: Abi::Aggregate { sized: false },
789                 largest_niche: None,
790                 align: dl.i8_align,
791                 size: Size::ZERO,
792             }),
793
794             // Odd unit types.
795             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
796             ty::Dynamic(..) | ty::Foreign(..) => {
797                 let mut unit = self.univariant_uninterned(
798                     ty,
799                     &[],
800                     &ReprOptions::default(),
801                     StructKind::AlwaysSized,
802                 )?;
803                 match unit.abi {
804                     Abi::Aggregate { ref mut sized } => *sized = false,
805                     _ => bug!(),
806                 }
807                 tcx.intern_layout(unit)
808             }
809
810             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
811
812             ty::Closure(_, ref substs) => {
813                 let tys = substs.as_closure().upvar_tys();
814                 univariant(
815                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
816                     &ReprOptions::default(),
817                     StructKind::AlwaysSized,
818                 )?
819             }
820
821             ty::Tuple(tys) => {
822                 let kind =
823                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
824
825                 univariant(
826                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
827                     &ReprOptions::default(),
828                     kind,
829                 )?
830             }
831
832             // SIMD vector types.
833             ty::Adt(def, substs) if def.repr().simd() => {
834                 if !def.is_struct() {
835                     // Should have yielded E0517 by now.
836                     tcx.sess.delay_span_bug(
837                         DUMMY_SP,
838                         "#[repr(simd)] was applied to an ADT that is not a struct",
839                     );
840                     return Err(LayoutError::Unknown(ty));
841                 }
842
843                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
844                 //
845                 // * #[repr(simd)] struct S(T, T, T, T);
846                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
847                 // * #[repr(simd)] struct S([T; 4])
848                 //
849                 // where T is a primitive scalar (integer/float/pointer).
850
851                 // SIMD vectors with zero fields are not supported.
852                 // (should be caught by typeck)
853                 if def.non_enum_variant().fields.is_empty() {
854                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
855                 }
856
857                 // Type of the first ADT field:
858                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
859
860                 // Heterogeneous SIMD vectors are not supported:
861                 // (should be caught by typeck)
862                 for fi in &def.non_enum_variant().fields {
863                     if fi.ty(tcx, substs) != f0_ty {
864                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
865                     }
866                 }
867
868                 // The element type and number of elements of the SIMD vector
869                 // are obtained from:
870                 //
871                 // * the element type and length of the single array field, if
872                 // the first field is of array type, or
873                 //
874                 // * the homogenous field type and the number of fields.
875                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
876                     // First ADT field is an array:
877
878                     // SIMD vectors with multiple array fields are not supported:
879                     // (should be caught by typeck)
880                     if def.non_enum_variant().fields.len() != 1 {
881                         tcx.sess.fatal(&format!(
882                             "monomorphising SIMD type `{}` with more than one array field",
883                             ty
884                         ));
885                     }
886
887                     // Extract the number of elements from the layout of the array field:
888                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
889                         return Err(LayoutError::Unknown(ty));
890                     };
891
892                     (*e_ty, *count, true)
893                 } else {
894                     // First ADT field is not an array:
895                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
896                 };
897
898                 // SIMD vectors of zero length are not supported.
899                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
900                 // support.
901                 //
902                 // Can't be caught in typeck if the array length is generic.
903                 if e_len == 0 {
904                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
905                 } else if e_len > MAX_SIMD_LANES {
906                     tcx.sess.fatal(&format!(
907                         "monomorphising SIMD type `{}` of length greater than {}",
908                         ty, MAX_SIMD_LANES,
909                     ));
910                 }
911
912                 // Compute the ABI of the element type:
913                 let e_ly = self.layout_of(e_ty)?;
914                 let Abi::Scalar(e_abi) = e_ly.abi else {
915                     // This error isn't caught in typeck, e.g., if
916                     // the element type of the vector is generic.
917                     tcx.sess.fatal(&format!(
918                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
919                         (integer/float/pointer) element type `{}`",
920                         ty, e_ty
921                     ))
922                 };
923
924                 // Compute the size and alignment of the vector:
925                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
926                 let align = dl.vector_align(size);
927                 let size = size.align_to(align.abi);
928
929                 // Compute the placement of the vector fields:
930                 let fields = if is_array {
931                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
932                 } else {
933                     FieldsShape::Array { stride: e_ly.size, count: e_len }
934                 };
935
936                 tcx.intern_layout(LayoutS {
937                     variants: Variants::Single { index: VariantIdx::new(0) },
938                     fields,
939                     abi: Abi::Vector { element: e_abi, count: e_len },
940                     largest_niche: e_ly.largest_niche,
941                     size,
942                     align,
943                 })
944             }
945
946             // ADTs.
947             ty::Adt(def, substs) => {
948                 // Cache the field layouts.
949                 let variants = def
950                     .variants()
951                     .iter()
952                     .map(|v| {
953                         v.fields
954                             .iter()
955                             .map(|field| self.layout_of(field.ty(tcx, substs)))
956                             .collect::<Result<Vec<_>, _>>()
957                     })
958                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
959
960                 if def.is_union() {
961                     if def.repr().pack.is_some() && def.repr().align.is_some() {
962                         self.tcx.sess.delay_span_bug(
963                             tcx.def_span(def.did()),
964                             "union cannot be packed and aligned",
965                         );
966                         return Err(LayoutError::Unknown(ty));
967                     }
968
969                     let mut align =
970                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
971
972                     if let Some(repr_align) = def.repr().align {
973                         align = align.max(AbiAndPrefAlign::new(repr_align));
974                     }
975
976                     let optimize = !def.repr().inhibit_union_abi_opt();
977                     let mut size = Size::ZERO;
978                     let mut abi = Abi::Aggregate { sized: true };
979                     let index = VariantIdx::new(0);
980                     for field in &variants[index] {
981                         assert!(!field.is_unsized());
982                         align = align.max(field.align);
983
984                         // If all non-ZST fields have the same ABI, forward this ABI
985                         if optimize && !field.is_zst() {
986                             // Discard valid range information and allow undef
987                             let field_abi = match field.abi {
988                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
989                                 Abi::ScalarPair(x, y) => {
990                                     Abi::ScalarPair(x.to_union(), y.to_union())
991                                 }
992                                 Abi::Vector { element: x, count } => {
993                                     Abi::Vector { element: x.to_union(), count }
994                                 }
995                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
996                                     Abi::Aggregate { sized: true }
997                                 }
998                             };
999
1000                             if size == Size::ZERO {
1001                                 // first non ZST: initialize 'abi'
1002                                 abi = field_abi;
1003                             } else if abi != field_abi {
1004                                 // different fields have different ABI: reset to Aggregate
1005                                 abi = Abi::Aggregate { sized: true };
1006                             }
1007                         }
1008
1009                         size = cmp::max(size, field.size);
1010                     }
1011
1012                     if let Some(pack) = def.repr().pack {
1013                         align = align.min(AbiAndPrefAlign::new(pack));
1014                     }
1015
1016                     return Ok(tcx.intern_layout(LayoutS {
1017                         variants: Variants::Single { index },
1018                         fields: FieldsShape::Union(
1019                             NonZeroUsize::new(variants[index].len())
1020                                 .ok_or(LayoutError::Unknown(ty))?,
1021                         ),
1022                         abi,
1023                         largest_niche: None,
1024                         align,
1025                         size: size.align_to(align.abi),
1026                     }));
1027                 }
1028
1029                 // A variant is absent if it's uninhabited and only has ZST fields.
1030                 // Present uninhabited variants only require space for their fields,
1031                 // but *not* an encoding of the discriminant (e.g., a tag value).
1032                 // See issue #49298 for more details on the need to leave space
1033                 // for non-ZST uninhabited data (mostly partial initialization).
1034                 let absent = |fields: &[TyAndLayout<'_>]| {
1035                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
1036                     let is_zst = fields.iter().all(|f| f.is_zst());
1037                     uninhabited && is_zst
1038                 };
1039                 let (present_first, present_second) = {
1040                     let mut present_variants = variants
1041                         .iter_enumerated()
1042                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
1043                     (present_variants.next(), present_variants.next())
1044                 };
1045                 let present_first = match present_first {
1046                     Some(present_first) => present_first,
1047                     // Uninhabited because it has no variants, or only absent ones.
1048                     None if def.is_enum() => {
1049                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
1050                     }
1051                     // If it's a struct, still compute a layout so that we can still compute the
1052                     // field offsets.
1053                     None => VariantIdx::new(0),
1054                 };
1055
1056                 let is_struct = !def.is_enum() ||
1057                     // Only one variant is present.
1058                     (present_second.is_none() &&
1059                     // Representation optimizations are allowed.
1060                     !def.repr().inhibit_enum_layout_opt());
1061                 if is_struct {
1062                     // Struct, or univariant enum equivalent to a struct.
1063                     // (Typechecking will reject discriminant-sizing attrs.)
1064
1065                     let v = present_first;
1066                     let kind = if def.is_enum() || variants[v].is_empty() {
1067                         StructKind::AlwaysSized
1068                     } else {
1069                         let param_env = tcx.param_env(def.did());
1070                         let last_field = def.variant(v).fields.last().unwrap();
1071                         let always_sized =
1072                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
1073                         if !always_sized {
1074                             StructKind::MaybeUnsized
1075                         } else {
1076                             StructKind::AlwaysSized
1077                         }
1078                     };
1079
1080                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
1081                     st.variants = Variants::Single { index: v };
1082                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1083                     match st.abi {
1084                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1085                             // the asserts ensure that we are not using the
1086                             // `#[rustc_layout_scalar_valid_range(n)]`
1087                             // attribute to widen the range of anything as that would probably
1088                             // result in UB somewhere
1089                             // FIXME(eddyb) the asserts are probably not needed,
1090                             // as larger validity ranges would result in missed
1091                             // optimizations, *not* wrongly assuming the inner
1092                             // value is valid. e.g. unions enlarge validity ranges,
1093                             // because the values may be uninitialized.
1094                             if let Bound::Included(start) = start {
1095                                 // FIXME(eddyb) this might be incorrect - it doesn't
1096                                 // account for wrap-around (end < start) ranges.
1097                                 let valid_range = scalar.valid_range_mut();
1098                                 assert!(valid_range.start <= start);
1099                                 valid_range.start = start;
1100                             }
1101                             if let Bound::Included(end) = end {
1102                                 // FIXME(eddyb) this might be incorrect - it doesn't
1103                                 // account for wrap-around (end < start) ranges.
1104                                 let valid_range = scalar.valid_range_mut();
1105                                 assert!(valid_range.end >= end);
1106                                 valid_range.end = end;
1107                             }
1108
1109                             // Update `largest_niche` if we have introduced a larger niche.
1110                             let niche = if def.repr().hide_niche() {
1111                                 None
1112                             } else {
1113                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1114                             };
1115                             if let Some(niche) = niche {
1116                                 match st.largest_niche {
1117                                     Some(largest_niche) => {
1118                                         // Replace the existing niche even if they're equal,
1119                                         // because this one is at a lower offset.
1120                                         if largest_niche.available(dl) <= niche.available(dl) {
1121                                             st.largest_niche = Some(niche);
1122                                         }
1123                                     }
1124                                     None => st.largest_niche = Some(niche),
1125                                 }
1126                             }
1127                         }
1128                         _ => assert!(
1129                             start == Bound::Unbounded && end == Bound::Unbounded,
1130                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1131                             def,
1132                             st,
1133                         ),
1134                     }
1135
1136                     return Ok(tcx.intern_layout(st));
1137                 }
1138
1139                 // At this point, we have handled all unions and
1140                 // structs. (We have also handled univariant enums
1141                 // that allow representation optimization.)
1142                 assert!(def.is_enum());
1143
1144                 // The current code for niche-filling relies on variant indices
1145                 // instead of actual discriminants, so dataful enums with
1146                 // explicit discriminants (RFC #2363) would misbehave.
1147                 let no_explicit_discriminants = def
1148                     .variants()
1149                     .iter_enumerated()
1150                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1151
1152                 let mut niche_filling_layout = None;
1153
1154                 // Niche-filling enum optimization.
1155                 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1156                     let mut dataful_variant = None;
1157                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1158
1159                     // Find one non-ZST variant.
1160                     'variants: for (v, fields) in variants.iter_enumerated() {
1161                         if absent(fields) {
1162                             continue 'variants;
1163                         }
1164                         for f in fields {
1165                             if !f.is_zst() {
1166                                 if dataful_variant.is_none() {
1167                                     dataful_variant = Some(v);
1168                                     continue 'variants;
1169                                 } else {
1170                                     dataful_variant = None;
1171                                     break 'variants;
1172                                 }
1173                             }
1174                         }
1175                         niche_variants = *niche_variants.start().min(&v)..=v;
1176                     }
1177
1178                     if niche_variants.start() > niche_variants.end() {
1179                         dataful_variant = None;
1180                     }
1181
1182                     if let Some(i) = dataful_variant {
1183                         let count = (niche_variants.end().as_u32()
1184                             - niche_variants.start().as_u32()
1185                             + 1) as u128;
1186
1187                         // Find the field with the largest niche
1188                         let niche_candidate = variants[i]
1189                             .iter()
1190                             .enumerate()
1191                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1192                             .max_by_key(|(_, niche)| niche.available(dl));
1193
1194                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1195                             niche_candidate.and_then(|(field_index, niche)| {
1196                                 Some((field_index, niche, niche.reserve(self, count)?))
1197                             })
1198                         {
1199                             let mut align = dl.aggregate_align;
1200                             let st = variants
1201                                 .iter_enumerated()
1202                                 .map(|(j, v)| {
1203                                     let mut st = self.univariant_uninterned(
1204                                         ty,
1205                                         v,
1206                                         &def.repr(),
1207                                         StructKind::AlwaysSized,
1208                                     )?;
1209                                     st.variants = Variants::Single { index: j };
1210
1211                                     align = align.max(st.align);
1212
1213                                     Ok(tcx.intern_layout(st))
1214                                 })
1215                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1216
1217                             let offset = st[i].fields().offset(field_index) + niche.offset;
1218                             let size = st[i].size();
1219
1220                             let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1221                                 Abi::Uninhabited
1222                             } else {
1223                                 match st[i].abi() {
1224                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1225                                     Abi::ScalarPair(first, second) => {
1226                                         // Only the niche is guaranteed to be initialised,
1227                                         // so use union layout for the other primitive.
1228                                         if offset.bytes() == 0 {
1229                                             Abi::ScalarPair(niche_scalar, second.to_union())
1230                                         } else {
1231                                             Abi::ScalarPair(first.to_union(), niche_scalar)
1232                                         }
1233                                     }
1234                                     _ => Abi::Aggregate { sized: true },
1235                                 }
1236                             };
1237
1238                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1239
1240                             niche_filling_layout = Some(LayoutS {
1241                                 variants: Variants::Multiple {
1242                                     tag: niche_scalar,
1243                                     tag_encoding: TagEncoding::Niche {
1244                                         dataful_variant: i,
1245                                         niche_variants,
1246                                         niche_start,
1247                                     },
1248                                     tag_field: 0,
1249                                     variants: st,
1250                                 },
1251                                 fields: FieldsShape::Arbitrary {
1252                                     offsets: vec![offset],
1253                                     memory_index: vec![0],
1254                                 },
1255                                 abi,
1256                                 largest_niche,
1257                                 size,
1258                                 align,
1259                             });
1260                         }
1261                     }
1262                 }
1263
1264                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1265                 let discr_type = def.repr().discr_type();
1266                 let bits = Integer::from_attr(self, discr_type).size().bits();
1267                 for (i, discr) in def.discriminants(tcx) {
1268                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1269                         continue;
1270                     }
1271                     let mut x = discr.val as i128;
1272                     if discr_type.is_signed() {
1273                         // sign extend the raw representation to be an i128
1274                         x = (x << (128 - bits)) >> (128 - bits);
1275                     }
1276                     if x < min {
1277                         min = x;
1278                     }
1279                     if x > max {
1280                         max = x;
1281                     }
1282                 }
1283                 // We might have no inhabited variants, so pretend there's at least one.
1284                 if (min, max) == (i128::MAX, i128::MIN) {
1285                     min = 0;
1286                     max = 0;
1287                 }
1288                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1289                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1290
1291                 let mut align = dl.aggregate_align;
1292                 let mut size = Size::ZERO;
1293
1294                 // We're interested in the smallest alignment, so start large.
1295                 let mut start_align = Align::from_bytes(256).unwrap();
1296                 assert_eq!(Integer::for_align(dl, start_align), None);
1297
1298                 // repr(C) on an enum tells us to make a (tag, union) layout,
1299                 // so we need to grow the prefix alignment to be at least
1300                 // the alignment of the union. (This value is used both for
1301                 // determining the alignment of the overall enum, and the
1302                 // determining the alignment of the payload after the tag.)
1303                 let mut prefix_align = min_ity.align(dl).abi;
1304                 if def.repr().c() {
1305                     for fields in &variants {
1306                         for field in fields {
1307                             prefix_align = prefix_align.max(field.align.abi);
1308                         }
1309                     }
1310                 }
1311
1312                 // Create the set of structs that represent each variant.
1313                 let mut layout_variants = variants
1314                     .iter_enumerated()
1315                     .map(|(i, field_layouts)| {
1316                         let mut st = self.univariant_uninterned(
1317                             ty,
1318                             &field_layouts,
1319                             &def.repr(),
1320                             StructKind::Prefixed(min_ity.size(), prefix_align),
1321                         )?;
1322                         st.variants = Variants::Single { index: i };
1323                         // Find the first field we can't move later
1324                         // to make room for a larger discriminant.
1325                         for field in
1326                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1327                         {
1328                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1329                                 start_align = start_align.min(field.align.abi);
1330                                 break;
1331                             }
1332                         }
1333                         size = cmp::max(size, st.size);
1334                         align = align.max(st.align);
1335                         Ok(st)
1336                     })
1337                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1338
1339                 // Align the maximum variant size to the largest alignment.
1340                 size = size.align_to(align.abi);
1341
1342                 if size.bytes() >= dl.obj_size_bound() {
1343                     return Err(LayoutError::SizeOverflow(ty));
1344                 }
1345
1346                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1347                 if typeck_ity < min_ity {
1348                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1349                     // some reason at this point (based on values discriminant can take on). Mostly
1350                     // because this discriminant will be loaded, and then stored into variable of
1351                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1352                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1353                     // discriminant values. That would be a bug, because then, in codegen, in order
1354                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1355                     // space necessary to represent would have to be discarded (or layout is wrong
1356                     // on thinking it needs 16 bits)
1357                     bug!(
1358                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1359                         min_ity,
1360                         typeck_ity
1361                     );
1362                     // However, it is fine to make discr type however large (as an optimisation)
1363                     // after this point â€“ we’ll just truncate the value we load in codegen.
1364                 }
1365
1366                 // Check to see if we should use a different type for the
1367                 // discriminant. We can safely use a type with the same size
1368                 // as the alignment of the first field of each variant.
1369                 // We increase the size of the discriminant to avoid LLVM copying
1370                 // padding when it doesn't need to. This normally causes unaligned
1371                 // load/stores and excessive memcpy/memset operations. By using a
1372                 // bigger integer size, LLVM can be sure about its contents and
1373                 // won't be so conservative.
1374
1375                 // Use the initial field alignment
1376                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1377                     min_ity
1378                 } else {
1379                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1380                 };
1381
1382                 // If the alignment is not larger than the chosen discriminant size,
1383                 // don't use the alignment as the final size.
1384                 if ity <= min_ity {
1385                     ity = min_ity;
1386                 } else {
1387                     // Patch up the variants' first few fields.
1388                     let old_ity_size = min_ity.size();
1389                     let new_ity_size = ity.size();
1390                     for variant in &mut layout_variants {
1391                         match variant.fields {
1392                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1393                                 for i in offsets {
1394                                     if *i <= old_ity_size {
1395                                         assert_eq!(*i, old_ity_size);
1396                                         *i = new_ity_size;
1397                                     }
1398                                 }
1399                                 // We might be making the struct larger.
1400                                 if variant.size <= old_ity_size {
1401                                     variant.size = new_ity_size;
1402                                 }
1403                             }
1404                             _ => bug!(),
1405                         }
1406                     }
1407                 }
1408
1409                 let tag_mask = ity.size().unsigned_int_max();
1410                 let tag = Scalar::Initialized {
1411                     value: Int(ity, signed),
1412                     valid_range: WrappingRange {
1413                         start: (min as u128 & tag_mask),
1414                         end: (max as u128 & tag_mask),
1415                     },
1416                 };
1417                 let mut abi = Abi::Aggregate { sized: true };
1418
1419                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1420                     abi = Abi::Uninhabited;
1421                 } else if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1422                     // Without latter check aligned enums with custom discriminant values
1423                     // Would result in ICE see the issue #92464 for more info
1424                     abi = Abi::Scalar(tag);
1425                 } else {
1426                     // Try to use a ScalarPair for all tagged enums.
1427                     let mut common_prim = None;
1428                     let mut common_prim_initialized_in_all_variants = true;
1429                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1430                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1431                             bug!();
1432                         };
1433                         let mut fields =
1434                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1435                         let (field, offset) = match (fields.next(), fields.next()) {
1436                             (None, None) => {
1437                                 common_prim_initialized_in_all_variants = false;
1438                                 continue;
1439                             }
1440                             (Some(pair), None) => pair,
1441                             _ => {
1442                                 common_prim = None;
1443                                 break;
1444                             }
1445                         };
1446                         let prim = match field.abi {
1447                             Abi::Scalar(scalar) => {
1448                                 common_prim_initialized_in_all_variants &=
1449                                     matches!(scalar, Scalar::Initialized { .. });
1450                                 scalar.primitive()
1451                             }
1452                             _ => {
1453                                 common_prim = None;
1454                                 break;
1455                             }
1456                         };
1457                         if let Some(pair) = common_prim {
1458                             // This is pretty conservative. We could go fancier
1459                             // by conflating things like i32 and u32, or even
1460                             // realising that (u8, u8) could just cohabit with
1461                             // u16 or even u32.
1462                             if pair != (prim, offset) {
1463                                 common_prim = None;
1464                                 break;
1465                             }
1466                         } else {
1467                             common_prim = Some((prim, offset));
1468                         }
1469                     }
1470                     if let Some((prim, offset)) = common_prim {
1471                         let prim_scalar = if common_prim_initialized_in_all_variants {
1472                             scalar_unit(prim)
1473                         } else {
1474                             // Common prim might be uninit.
1475                             Scalar::Union { value: prim }
1476                         };
1477                         let pair = self.scalar_pair(tag, prim_scalar);
1478                         let pair_offsets = match pair.fields {
1479                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1480                                 assert_eq!(memory_index, &[0, 1]);
1481                                 offsets
1482                             }
1483                             _ => bug!(),
1484                         };
1485                         if pair_offsets[0] == Size::ZERO
1486                             && pair_offsets[1] == *offset
1487                             && align == pair.align
1488                             && size == pair.size
1489                         {
1490                             // We can use `ScalarPair` only when it matches our
1491                             // already computed layout (including `#[repr(C)]`).
1492                             abi = pair.abi;
1493                         }
1494                     }
1495                 }
1496
1497                 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1498                 // variants to ensure they are consistent. This is because a downcast is
1499                 // semantically a NOP, and thus should not affect layout.
1500                 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1501                     for variant in &mut layout_variants {
1502                         // We only do this for variants with fields; the others are not accessed anyway.
1503                         // Also do not overwrite any already existing "clever" ABIs.
1504                         if variant.fields.count() > 0
1505                             && matches!(variant.abi, Abi::Aggregate { .. })
1506                         {
1507                             variant.abi = abi;
1508                             // Also need to bump up the size and alignment, so that the entire value fits in here.
1509                             variant.size = cmp::max(variant.size, size);
1510                             variant.align.abi = cmp::max(variant.align.abi, align.abi);
1511                         }
1512                     }
1513                 }
1514
1515                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1516
1517                 let layout_variants =
1518                     layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1519
1520                 let tagged_layout = LayoutS {
1521                     variants: Variants::Multiple {
1522                         tag,
1523                         tag_encoding: TagEncoding::Direct,
1524                         tag_field: 0,
1525                         variants: layout_variants,
1526                     },
1527                     fields: FieldsShape::Arbitrary {
1528                         offsets: vec![Size::ZERO],
1529                         memory_index: vec![0],
1530                     },
1531                     largest_niche,
1532                     abi,
1533                     align,
1534                     size,
1535                 };
1536
1537                 let best_layout = match (tagged_layout, niche_filling_layout) {
1538                     (tagged_layout, Some(niche_filling_layout)) => {
1539                         // Pick the smaller layout; otherwise,
1540                         // pick the layout with the larger niche; otherwise,
1541                         // pick tagged as it has simpler codegen.
1542                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1543                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1544                             (layout.size, cmp::Reverse(niche_size))
1545                         })
1546                     }
1547                     (tagged_layout, None) => tagged_layout,
1548                 };
1549
1550                 tcx.intern_layout(best_layout)
1551             }
1552
1553             // Types with no meaningful known layout.
1554             ty::Projection(_) | ty::Opaque(..) => {
1555                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1556                 // if that was possible, so there's no reason to try again here.
1557                 return Err(LayoutError::Unknown(ty));
1558             }
1559
1560             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1561                 bug!("Layout::compute: unexpected type `{}`", ty)
1562             }
1563
1564             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1565                 return Err(LayoutError::Unknown(ty));
1566             }
1567         })
1568     }
1569 }
1570
1571 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1572 #[derive(Clone, Debug, PartialEq)]
1573 enum SavedLocalEligibility {
1574     Unassigned,
1575     Assigned(VariantIdx),
1576     // FIXME: Use newtype_index so we aren't wasting bytes
1577     Ineligible(Option<u32>),
1578 }
1579
1580 // When laying out generators, we divide our saved local fields into two
1581 // categories: overlap-eligible and overlap-ineligible.
1582 //
1583 // Those fields which are ineligible for overlap go in a "prefix" at the
1584 // beginning of the layout, and always have space reserved for them.
1585 //
1586 // Overlap-eligible fields are only assigned to one variant, so we lay
1587 // those fields out for each variant and put them right after the
1588 // prefix.
1589 //
1590 // Finally, in the layout details, we point to the fields from the
1591 // variants they are assigned to. It is possible for some fields to be
1592 // included in multiple variants. No field ever "moves around" in the
1593 // layout; its offset is always the same.
1594 //
1595 // Also included in the layout are the upvars and the discriminant.
1596 // These are included as fields on the "outer" layout; they are not part
1597 // of any variant.
1598 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1599     /// Compute the eligibility and assignment of each local.
1600     fn generator_saved_local_eligibility(
1601         &self,
1602         info: &GeneratorLayout<'tcx>,
1603     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1604         use SavedLocalEligibility::*;
1605
1606         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1607             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1608
1609         // The saved locals not eligible for overlap. These will get
1610         // "promoted" to the prefix of our generator.
1611         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1612
1613         // Figure out which of our saved locals are fields in only
1614         // one variant. The rest are deemed ineligible for overlap.
1615         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1616             for local in fields {
1617                 match assignments[*local] {
1618                     Unassigned => {
1619                         assignments[*local] = Assigned(variant_index);
1620                     }
1621                     Assigned(idx) => {
1622                         // We've already seen this local at another suspension
1623                         // point, so it is no longer a candidate.
1624                         trace!(
1625                             "removing local {:?} in >1 variant ({:?}, {:?})",
1626                             local,
1627                             variant_index,
1628                             idx
1629                         );
1630                         ineligible_locals.insert(*local);
1631                         assignments[*local] = Ineligible(None);
1632                     }
1633                     Ineligible(_) => {}
1634                 }
1635             }
1636         }
1637
1638         // Next, check every pair of eligible locals to see if they
1639         // conflict.
1640         for local_a in info.storage_conflicts.rows() {
1641             let conflicts_a = info.storage_conflicts.count(local_a);
1642             if ineligible_locals.contains(local_a) {
1643                 continue;
1644             }
1645
1646             for local_b in info.storage_conflicts.iter(local_a) {
1647                 // local_a and local_b are storage live at the same time, therefore they
1648                 // cannot overlap in the generator layout. The only way to guarantee
1649                 // this is if they are in the same variant, or one is ineligible
1650                 // (which means it is stored in every variant).
1651                 if ineligible_locals.contains(local_b)
1652                     || assignments[local_a] == assignments[local_b]
1653                 {
1654                     continue;
1655                 }
1656
1657                 // If they conflict, we will choose one to make ineligible.
1658                 // This is not always optimal; it's just a greedy heuristic that
1659                 // seems to produce good results most of the time.
1660                 let conflicts_b = info.storage_conflicts.count(local_b);
1661                 let (remove, other) =
1662                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1663                 ineligible_locals.insert(remove);
1664                 assignments[remove] = Ineligible(None);
1665                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1666             }
1667         }
1668
1669         // Count the number of variants in use. If only one of them, then it is
1670         // impossible to overlap any locals in our layout. In this case it's
1671         // always better to make the remaining locals ineligible, so we can
1672         // lay them out with the other locals in the prefix and eliminate
1673         // unnecessary padding bytes.
1674         {
1675             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1676             for assignment in &assignments {
1677                 if let Assigned(idx) = assignment {
1678                     used_variants.insert(*idx);
1679                 }
1680             }
1681             if used_variants.count() < 2 {
1682                 for assignment in assignments.iter_mut() {
1683                     *assignment = Ineligible(None);
1684                 }
1685                 ineligible_locals.insert_all();
1686             }
1687         }
1688
1689         // Write down the order of our locals that will be promoted to the prefix.
1690         {
1691             for (idx, local) in ineligible_locals.iter().enumerate() {
1692                 assignments[local] = Ineligible(Some(idx as u32));
1693             }
1694         }
1695         debug!("generator saved local assignments: {:?}", assignments);
1696
1697         (ineligible_locals, assignments)
1698     }
1699
1700     /// Compute the full generator layout.
1701     fn generator_layout(
1702         &self,
1703         ty: Ty<'tcx>,
1704         def_id: hir::def_id::DefId,
1705         substs: SubstsRef<'tcx>,
1706     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1707         use SavedLocalEligibility::*;
1708         let tcx = self.tcx;
1709         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1710
1711         let Some(info) = tcx.generator_layout(def_id) else {
1712             return Err(LayoutError::Unknown(ty));
1713         };
1714         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1715
1716         // Build a prefix layout, including "promoting" all ineligible
1717         // locals as part of the prefix. We compute the layout of all of
1718         // these fields at once to get optimal packing.
1719         let tag_index = substs.as_generator().prefix_tys().count();
1720
1721         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1722         let max_discr = (info.variant_fields.len() - 1) as u128;
1723         let discr_int = Integer::fit_unsigned(max_discr);
1724         let discr_int_ty = discr_int.to_ty(tcx, false);
1725         let tag = Scalar::Initialized {
1726             value: Primitive::Int(discr_int, false),
1727             valid_range: WrappingRange { start: 0, end: max_discr },
1728         };
1729         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1730         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1731
1732         let promoted_layouts = ineligible_locals
1733             .iter()
1734             .map(|local| subst_field(info.field_tys[local]))
1735             .map(|ty| tcx.mk_maybe_uninit(ty))
1736             .map(|ty| self.layout_of(ty));
1737         let prefix_layouts = substs
1738             .as_generator()
1739             .prefix_tys()
1740             .map(|ty| self.layout_of(ty))
1741             .chain(iter::once(Ok(tag_layout)))
1742             .chain(promoted_layouts)
1743             .collect::<Result<Vec<_>, _>>()?;
1744         let prefix = self.univariant_uninterned(
1745             ty,
1746             &prefix_layouts,
1747             &ReprOptions::default(),
1748             StructKind::AlwaysSized,
1749         )?;
1750
1751         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1752
1753         // Split the prefix layout into the "outer" fields (upvars and
1754         // discriminant) and the "promoted" fields. Promoted fields will
1755         // get included in each variant that requested them in
1756         // GeneratorLayout.
1757         debug!("prefix = {:#?}", prefix);
1758         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1759             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1760                 let mut inverse_memory_index = invert_mapping(&memory_index);
1761
1762                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1763                 // "outer" and "promoted" fields respectively.
1764                 let b_start = (tag_index + 1) as u32;
1765                 let offsets_b = offsets.split_off(b_start as usize);
1766                 let offsets_a = offsets;
1767
1768                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1769                 // by preserving the order but keeping only one disjoint "half" each.
1770                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1771                 let inverse_memory_index_b: Vec<_> =
1772                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1773                 inverse_memory_index.retain(|&i| i < b_start);
1774                 let inverse_memory_index_a = inverse_memory_index;
1775
1776                 // Since `inverse_memory_index_{a,b}` each only refer to their
1777                 // respective fields, they can be safely inverted
1778                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1779                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1780
1781                 let outer_fields =
1782                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1783                 (outer_fields, offsets_b, memory_index_b)
1784             }
1785             _ => bug!(),
1786         };
1787
1788         let mut size = prefix.size;
1789         let mut align = prefix.align;
1790         let variants = info
1791             .variant_fields
1792             .iter_enumerated()
1793             .map(|(index, variant_fields)| {
1794                 // Only include overlap-eligible fields when we compute our variant layout.
1795                 let variant_only_tys = variant_fields
1796                     .iter()
1797                     .filter(|local| match assignments[**local] {
1798                         Unassigned => bug!(),
1799                         Assigned(v) if v == index => true,
1800                         Assigned(_) => bug!("assignment does not match variant"),
1801                         Ineligible(_) => false,
1802                     })
1803                     .map(|local| subst_field(info.field_tys[*local]));
1804
1805                 let mut variant = self.univariant_uninterned(
1806                     ty,
1807                     &variant_only_tys
1808                         .map(|ty| self.layout_of(ty))
1809                         .collect::<Result<Vec<_>, _>>()?,
1810                     &ReprOptions::default(),
1811                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1812                 )?;
1813                 variant.variants = Variants::Single { index };
1814
1815                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1816                     bug!();
1817                 };
1818
1819                 // Now, stitch the promoted and variant-only fields back together in
1820                 // the order they are mentioned by our GeneratorLayout.
1821                 // Because we only use some subset (that can differ between variants)
1822                 // of the promoted fields, we can't just pick those elements of the
1823                 // `promoted_memory_index` (as we'd end up with gaps).
1824                 // So instead, we build an "inverse memory_index", as if all of the
1825                 // promoted fields were being used, but leave the elements not in the
1826                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1827                 // obtain a valid (bijective) mapping.
1828                 const INVALID_FIELD_IDX: u32 = !0;
1829                 let mut combined_inverse_memory_index =
1830                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1831                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1832                 let combined_offsets = variant_fields
1833                     .iter()
1834                     .enumerate()
1835                     .map(|(i, local)| {
1836                         let (offset, memory_index) = match assignments[*local] {
1837                             Unassigned => bug!(),
1838                             Assigned(_) => {
1839                                 let (offset, memory_index) =
1840                                     offsets_and_memory_index.next().unwrap();
1841                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1842                             }
1843                             Ineligible(field_idx) => {
1844                                 let field_idx = field_idx.unwrap() as usize;
1845                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1846                             }
1847                         };
1848                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1849                         offset
1850                     })
1851                     .collect();
1852
1853                 // Remove the unused slots and invert the mapping to obtain the
1854                 // combined `memory_index` (also see previous comment).
1855                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1856                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1857
1858                 variant.fields = FieldsShape::Arbitrary {
1859                     offsets: combined_offsets,
1860                     memory_index: combined_memory_index,
1861                 };
1862
1863                 size = size.max(variant.size);
1864                 align = align.max(variant.align);
1865                 Ok(tcx.intern_layout(variant))
1866             })
1867             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1868
1869         size = size.align_to(align.abi);
1870
1871         let abi =
1872             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1873                 Abi::Uninhabited
1874             } else {
1875                 Abi::Aggregate { sized: true }
1876             };
1877
1878         let layout = tcx.intern_layout(LayoutS {
1879             variants: Variants::Multiple {
1880                 tag,
1881                 tag_encoding: TagEncoding::Direct,
1882                 tag_field: tag_index,
1883                 variants,
1884             },
1885             fields: outer_fields,
1886             abi,
1887             largest_niche: prefix.largest_niche,
1888             size,
1889             align,
1890         });
1891         debug!("generator layout ({:?}): {:#?}", ty, layout);
1892         Ok(layout)
1893     }
1894
1895     /// This is invoked by the `layout_of` query to record the final
1896     /// layout of each type.
1897     #[inline(always)]
1898     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1899         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1900         // for dumping later.
1901         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1902             self.record_layout_for_printing_outlined(layout)
1903         }
1904     }
1905
1906     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1907         // Ignore layouts that are done with non-empty environments or
1908         // non-monomorphic layouts, as the user only wants to see the stuff
1909         // resulting from the final codegen session.
1910         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1911             return;
1912         }
1913
1914         // (delay format until we actually need it)
1915         let record = |kind, packed, opt_discr_size, variants| {
1916             let type_desc = format!("{:?}", layout.ty);
1917             self.tcx.sess.code_stats.record_type_size(
1918                 kind,
1919                 type_desc,
1920                 layout.align.abi,
1921                 layout.size,
1922                 packed,
1923                 opt_discr_size,
1924                 variants,
1925             );
1926         };
1927
1928         let adt_def = match *layout.ty.kind() {
1929             ty::Adt(ref adt_def, _) => {
1930                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1931                 adt_def
1932             }
1933
1934             ty::Closure(..) => {
1935                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1936                 record(DataTypeKind::Closure, false, None, vec![]);
1937                 return;
1938             }
1939
1940             _ => {
1941                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1942                 return;
1943             }
1944         };
1945
1946         let adt_kind = adt_def.adt_kind();
1947         let adt_packed = adt_def.repr().pack.is_some();
1948
1949         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1950             let mut min_size = Size::ZERO;
1951             let field_info: Vec<_> = flds
1952                 .iter()
1953                 .enumerate()
1954                 .map(|(i, &name)| {
1955                     let field_layout = layout.field(self, i);
1956                     let offset = layout.fields.offset(i);
1957                     let field_end = offset + field_layout.size;
1958                     if min_size < field_end {
1959                         min_size = field_end;
1960                     }
1961                     FieldInfo {
1962                         name: name.to_string(),
1963                         offset: offset.bytes(),
1964                         size: field_layout.size.bytes(),
1965                         align: field_layout.align.abi.bytes(),
1966                     }
1967                 })
1968                 .collect();
1969
1970             VariantInfo {
1971                 name: n.map(|n| n.to_string()),
1972                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1973                 align: layout.align.abi.bytes(),
1974                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1975                 fields: field_info,
1976             }
1977         };
1978
1979         match layout.variants {
1980             Variants::Single { index } => {
1981                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1982                     debug!(
1983                         "print-type-size `{:#?}` variant {}",
1984                         layout,
1985                         adt_def.variant(index).name
1986                     );
1987                     let variant_def = &adt_def.variant(index);
1988                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1989                     record(
1990                         adt_kind.into(),
1991                         adt_packed,
1992                         None,
1993                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1994                     );
1995                 } else {
1996                     // (This case arises for *empty* enums; so give it
1997                     // zero variants.)
1998                     record(adt_kind.into(), adt_packed, None, vec![]);
1999                 }
2000             }
2001
2002             Variants::Multiple { tag, ref tag_encoding, .. } => {
2003                 debug!(
2004                     "print-type-size `{:#?}` adt general variants def {}",
2005                     layout.ty,
2006                     adt_def.variants().len()
2007                 );
2008                 let variant_infos: Vec<_> = adt_def
2009                     .variants()
2010                     .iter_enumerated()
2011                     .map(|(i, variant_def)| {
2012                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2013                         build_variant_info(
2014                             Some(variant_def.name),
2015                             &fields,
2016                             layout.for_variant(self, i),
2017                         )
2018                     })
2019                     .collect();
2020                 record(
2021                     adt_kind.into(),
2022                     adt_packed,
2023                     match tag_encoding {
2024                         TagEncoding::Direct => Some(tag.size(self)),
2025                         _ => None,
2026                     },
2027                     variant_infos,
2028                 );
2029             }
2030         }
2031     }
2032 }
2033
2034 /// Type size "skeleton", i.e., the only information determining a type's size.
2035 /// While this is conservative, (aside from constant sizes, only pointers,
2036 /// newtypes thereof and null pointer optimized enums are allowed), it is
2037 /// enough to statically check common use cases of transmute.
2038 #[derive(Copy, Clone, Debug)]
2039 pub enum SizeSkeleton<'tcx> {
2040     /// Any statically computable Layout.
2041     Known(Size),
2042
2043     /// A potentially-fat pointer.
2044     Pointer {
2045         /// If true, this pointer is never null.
2046         non_zero: bool,
2047         /// The type which determines the unsized metadata, if any,
2048         /// of this pointer. Either a type parameter or a projection
2049         /// depending on one, with regions erased.
2050         tail: Ty<'tcx>,
2051     },
2052 }
2053
2054 impl<'tcx> SizeSkeleton<'tcx> {
2055     pub fn compute(
2056         ty: Ty<'tcx>,
2057         tcx: TyCtxt<'tcx>,
2058         param_env: ty::ParamEnv<'tcx>,
2059     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2060         debug_assert!(!ty.has_infer_types_or_consts());
2061
2062         // First try computing a static layout.
2063         let err = match tcx.layout_of(param_env.and(ty)) {
2064             Ok(layout) => {
2065                 return Ok(SizeSkeleton::Known(layout.size));
2066             }
2067             Err(err) => err,
2068         };
2069
2070         match *ty.kind() {
2071             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2072                 let non_zero = !ty.is_unsafe_ptr();
2073                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2074                 match tail.kind() {
2075                     ty::Param(_) | ty::Projection(_) => {
2076                         debug_assert!(tail.has_param_types_or_consts());
2077                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2078                     }
2079                     _ => bug!(
2080                         "SizeSkeleton::compute({}): layout errored ({}), yet \
2081                               tail `{}` is not a type parameter or a projection",
2082                         ty,
2083                         err,
2084                         tail
2085                     ),
2086                 }
2087             }
2088
2089             ty::Adt(def, substs) => {
2090                 // Only newtypes and enums w/ nullable pointer optimization.
2091                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2092                     return Err(err);
2093                 }
2094
2095                 // Get a zero-sized variant or a pointer newtype.
2096                 let zero_or_ptr_variant = |i| {
2097                     let i = VariantIdx::new(i);
2098                     let fields =
2099                         def.variant(i).fields.iter().map(|field| {
2100                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2101                         });
2102                     let mut ptr = None;
2103                     for field in fields {
2104                         let field = field?;
2105                         match field {
2106                             SizeSkeleton::Known(size) => {
2107                                 if size.bytes() > 0 {
2108                                     return Err(err);
2109                                 }
2110                             }
2111                             SizeSkeleton::Pointer { .. } => {
2112                                 if ptr.is_some() {
2113                                     return Err(err);
2114                                 }
2115                                 ptr = Some(field);
2116                             }
2117                         }
2118                     }
2119                     Ok(ptr)
2120                 };
2121
2122                 let v0 = zero_or_ptr_variant(0)?;
2123                 // Newtype.
2124                 if def.variants().len() == 1 {
2125                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2126                         return Ok(SizeSkeleton::Pointer {
2127                             non_zero: non_zero
2128                                 || match tcx.layout_scalar_valid_range(def.did()) {
2129                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2130                                     (Bound::Included(start), Bound::Included(end)) => {
2131                                         0 < start && start < end
2132                                     }
2133                                     _ => false,
2134                                 },
2135                             tail,
2136                         });
2137                     } else {
2138                         return Err(err);
2139                     }
2140                 }
2141
2142                 let v1 = zero_or_ptr_variant(1)?;
2143                 // Nullable pointer enum optimization.
2144                 match (v0, v1) {
2145                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2146                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2147                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2148                     }
2149                     _ => Err(err),
2150                 }
2151             }
2152
2153             ty::Projection(_) | ty::Opaque(..) => {
2154                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2155                 if ty == normalized {
2156                     Err(err)
2157                 } else {
2158                     SizeSkeleton::compute(normalized, tcx, param_env)
2159                 }
2160             }
2161
2162             _ => Err(err),
2163         }
2164     }
2165
2166     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2167         match (self, other) {
2168             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2169             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2170                 a == b
2171             }
2172             _ => false,
2173         }
2174     }
2175 }
2176
2177 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2178     fn tcx(&self) -> TyCtxt<'tcx>;
2179 }
2180
2181 pub trait HasParamEnv<'tcx> {
2182     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2183 }
2184
2185 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2186     #[inline]
2187     fn data_layout(&self) -> &TargetDataLayout {
2188         &self.data_layout
2189     }
2190 }
2191
2192 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2193     fn target_spec(&self) -> &Target {
2194         &self.sess.target
2195     }
2196 }
2197
2198 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2199     #[inline]
2200     fn tcx(&self) -> TyCtxt<'tcx> {
2201         *self
2202     }
2203 }
2204
2205 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2206     #[inline]
2207     fn data_layout(&self) -> &TargetDataLayout {
2208         &self.data_layout
2209     }
2210 }
2211
2212 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2213     fn target_spec(&self) -> &Target {
2214         &self.sess.target
2215     }
2216 }
2217
2218 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2219     #[inline]
2220     fn tcx(&self) -> TyCtxt<'tcx> {
2221         **self
2222     }
2223 }
2224
2225 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2226     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2227         self.param_env
2228     }
2229 }
2230
2231 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2232     fn data_layout(&self) -> &TargetDataLayout {
2233         self.tcx.data_layout()
2234     }
2235 }
2236
2237 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2238     fn target_spec(&self) -> &Target {
2239         self.tcx.target_spec()
2240     }
2241 }
2242
2243 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2244     fn tcx(&self) -> TyCtxt<'tcx> {
2245         self.tcx.tcx()
2246     }
2247 }
2248
2249 pub trait MaybeResult<T> {
2250     type Error;
2251
2252     fn from(x: Result<T, Self::Error>) -> Self;
2253     fn to_result(self) -> Result<T, Self::Error>;
2254 }
2255
2256 impl<T> MaybeResult<T> for T {
2257     type Error = !;
2258
2259     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2260         x
2261     }
2262     fn to_result(self) -> Result<T, Self::Error> {
2263         Ok(self)
2264     }
2265 }
2266
2267 impl<T, E> MaybeResult<T> for Result<T, E> {
2268     type Error = E;
2269
2270     fn from(x: Result<T, Self::Error>) -> Self {
2271         x
2272     }
2273     fn to_result(self) -> Result<T, Self::Error> {
2274         self
2275     }
2276 }
2277
2278 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2279
2280 /// Trait for contexts that want to be able to compute layouts of types.
2281 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2282 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2283     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2284     /// returned from `layout_of` (see also `handle_layout_err`).
2285     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2286
2287     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2288     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2289     #[inline]
2290     fn layout_tcx_at_span(&self) -> Span {
2291         DUMMY_SP
2292     }
2293
2294     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2295     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2296     ///
2297     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2298     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2299     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2300     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2301     fn handle_layout_err(
2302         &self,
2303         err: LayoutError<'tcx>,
2304         span: Span,
2305         ty: Ty<'tcx>,
2306     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2307 }
2308
2309 /// Blanket extension trait for contexts that can compute layouts of types.
2310 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2311     /// Computes the layout of a type. Note that this implicitly
2312     /// executes in "reveal all" mode, and will normalize the input type.
2313     #[inline]
2314     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2315         self.spanned_layout_of(ty, DUMMY_SP)
2316     }
2317
2318     /// Computes the layout of a type, at `span`. Note that this implicitly
2319     /// executes in "reveal all" mode, and will normalize the input type.
2320     // FIXME(eddyb) avoid passing information like this, and instead add more
2321     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2322     #[inline]
2323     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2324         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2325         let tcx = self.tcx().at(span);
2326
2327         MaybeResult::from(
2328             tcx.layout_of(self.param_env().and(ty))
2329                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2330         )
2331     }
2332 }
2333
2334 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2335
2336 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2337     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2338
2339     #[inline]
2340     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2341         err
2342     }
2343 }
2344
2345 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2346     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2347
2348     #[inline]
2349     fn layout_tcx_at_span(&self) -> Span {
2350         self.tcx.span
2351     }
2352
2353     #[inline]
2354     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2355         err
2356     }
2357 }
2358
2359 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2360 where
2361     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2362 {
2363     fn ty_and_layout_for_variant(
2364         this: TyAndLayout<'tcx>,
2365         cx: &C,
2366         variant_index: VariantIdx,
2367     ) -> TyAndLayout<'tcx> {
2368         let layout = match this.variants {
2369             Variants::Single { index }
2370                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2371                 if index == variant_index &&
2372                 // Don't confuse variants of uninhabited enums with the enum itself.
2373                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2374                 this.fields != FieldsShape::Primitive =>
2375             {
2376                 this.layout
2377             }
2378
2379             Variants::Single { index } => {
2380                 let tcx = cx.tcx();
2381                 let param_env = cx.param_env();
2382
2383                 // Deny calling for_variant more than once for non-Single enums.
2384                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2385                     assert_eq!(original_layout.variants, Variants::Single { index });
2386                 }
2387
2388                 let fields = match this.ty.kind() {
2389                     ty::Adt(def, _) if def.variants().is_empty() =>
2390                         bug!("for_variant called on zero-variant enum"),
2391                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2392                     _ => bug!(),
2393                 };
2394                 tcx.intern_layout(LayoutS {
2395                     variants: Variants::Single { index: variant_index },
2396                     fields: match NonZeroUsize::new(fields) {
2397                         Some(fields) => FieldsShape::Union(fields),
2398                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2399                     },
2400                     abi: Abi::Uninhabited,
2401                     largest_niche: None,
2402                     align: tcx.data_layout.i8_align,
2403                     size: Size::ZERO,
2404                 })
2405             }
2406
2407             Variants::Multiple { ref variants, .. } => variants[variant_index],
2408         };
2409
2410         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2411
2412         TyAndLayout { ty: this.ty, layout }
2413     }
2414
2415     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2416         enum TyMaybeWithLayout<'tcx> {
2417             Ty(Ty<'tcx>),
2418             TyAndLayout(TyAndLayout<'tcx>),
2419         }
2420
2421         fn field_ty_or_layout<'tcx>(
2422             this: TyAndLayout<'tcx>,
2423             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2424             i: usize,
2425         ) -> TyMaybeWithLayout<'tcx> {
2426             let tcx = cx.tcx();
2427             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2428                 TyAndLayout {
2429                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2430                     ty: tag.primitive().to_ty(tcx),
2431                 }
2432             };
2433
2434             match *this.ty.kind() {
2435                 ty::Bool
2436                 | ty::Char
2437                 | ty::Int(_)
2438                 | ty::Uint(_)
2439                 | ty::Float(_)
2440                 | ty::FnPtr(_)
2441                 | ty::Never
2442                 | ty::FnDef(..)
2443                 | ty::GeneratorWitness(..)
2444                 | ty::Foreign(..)
2445                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2446
2447                 // Potentially-fat pointers.
2448                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2449                     assert!(i < this.fields.count());
2450
2451                     // Reuse the fat `*T` type as its own thin pointer data field.
2452                     // This provides information about, e.g., DST struct pointees
2453                     // (which may have no non-DST form), and will work as long
2454                     // as the `Abi` or `FieldsShape` is checked by users.
2455                     if i == 0 {
2456                         let nil = tcx.mk_unit();
2457                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2458                             tcx.mk_mut_ptr(nil)
2459                         } else {
2460                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2461                         };
2462
2463                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2464                         // the `Result` should always work because the type is
2465                         // always either `*mut ()` or `&'static mut ()`.
2466                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2467                             ty: this.ty,
2468                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2469                         });
2470                     }
2471
2472                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2473                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2474                         ty::Dynamic(_, _) => {
2475                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2476                                 tcx.lifetimes.re_static,
2477                                 tcx.mk_array(tcx.types.usize, 3),
2478                             ))
2479                             /* FIXME: use actual fn pointers
2480                             Warning: naively computing the number of entries in the
2481                             vtable by counting the methods on the trait + methods on
2482                             all parent traits does not work, because some methods can
2483                             be not object safe and thus excluded from the vtable.
2484                             Increase this counter if you tried to implement this but
2485                             failed to do it without duplicating a lot of code from
2486                             other places in the compiler: 2
2487                             tcx.mk_tup(&[
2488                                 tcx.mk_array(tcx.types.usize, 3),
2489                                 tcx.mk_array(Option<fn()>),
2490                             ])
2491                             */
2492                         }
2493                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2494                     }
2495                 }
2496
2497                 // Arrays and slices.
2498                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2499                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2500
2501                 // Tuples, generators and closures.
2502                 ty::Closure(_, ref substs) => field_ty_or_layout(
2503                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2504                     cx,
2505                     i,
2506                 ),
2507
2508                 ty::Generator(def_id, ref substs, _) => match this.variants {
2509                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2510                         substs
2511                             .as_generator()
2512                             .state_tys(def_id, tcx)
2513                             .nth(index.as_usize())
2514                             .unwrap()
2515                             .nth(i)
2516                             .unwrap(),
2517                     ),
2518                     Variants::Multiple { tag, tag_field, .. } => {
2519                         if i == tag_field {
2520                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2521                         }
2522                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2523                     }
2524                 },
2525
2526                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2527
2528                 // ADTs.
2529                 ty::Adt(def, substs) => {
2530                     match this.variants {
2531                         Variants::Single { index } => {
2532                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2533                         }
2534
2535                         // Discriminant field for enums (where applicable).
2536                         Variants::Multiple { tag, .. } => {
2537                             assert_eq!(i, 0);
2538                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2539                         }
2540                     }
2541                 }
2542
2543                 ty::Projection(_)
2544                 | ty::Bound(..)
2545                 | ty::Placeholder(..)
2546                 | ty::Opaque(..)
2547                 | ty::Param(_)
2548                 | ty::Infer(_)
2549                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2550             }
2551         }
2552
2553         match field_ty_or_layout(this, cx, i) {
2554             TyMaybeWithLayout::Ty(field_ty) => {
2555                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2556                     bug!(
2557                         "failed to get layout for `{}`: {},\n\
2558                          despite it being a field (#{}) of an existing layout: {:#?}",
2559                         field_ty,
2560                         e,
2561                         i,
2562                         this
2563                     )
2564                 })
2565             }
2566             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2567         }
2568     }
2569
2570     fn ty_and_layout_pointee_info_at(
2571         this: TyAndLayout<'tcx>,
2572         cx: &C,
2573         offset: Size,
2574     ) -> Option<PointeeInfo> {
2575         let tcx = cx.tcx();
2576         let param_env = cx.param_env();
2577
2578         let addr_space_of_ty = |ty: Ty<'tcx>| {
2579             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2580         };
2581
2582         let pointee_info = match *this.ty.kind() {
2583             ty::RawPtr(mt) if offset.bytes() == 0 => {
2584                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2585                     size: layout.size,
2586                     align: layout.align.abi,
2587                     safe: None,
2588                     address_space: addr_space_of_ty(mt.ty),
2589                 })
2590             }
2591             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2592                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2593                     size: layout.size,
2594                     align: layout.align.abi,
2595                     safe: None,
2596                     address_space: cx.data_layout().instruction_address_space,
2597                 })
2598             }
2599             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2600                 let address_space = addr_space_of_ty(ty);
2601                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2602                     // Use conservative pointer kind if not optimizing. This saves us the
2603                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2604                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2605                     PointerKind::Shared
2606                 } else {
2607                     match mt {
2608                         hir::Mutability::Not => {
2609                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2610                                 PointerKind::Frozen
2611                             } else {
2612                                 PointerKind::Shared
2613                             }
2614                         }
2615                         hir::Mutability::Mut => {
2616                             // References to self-referential structures should not be considered
2617                             // noalias, as another pointer to the structure can be obtained, that
2618                             // is not based-on the original reference. We consider all !Unpin
2619                             // types to be potentially self-referential here.
2620                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2621                                 PointerKind::UniqueBorrowed
2622                             } else {
2623                                 PointerKind::Shared
2624                             }
2625                         }
2626                     }
2627                 };
2628
2629                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2630                     size: layout.size,
2631                     align: layout.align.abi,
2632                     safe: Some(kind),
2633                     address_space,
2634                 })
2635             }
2636
2637             _ => {
2638                 let mut data_variant = match this.variants {
2639                     // Within the discriminant field, only the niche itself is
2640                     // always initialized, so we only check for a pointer at its
2641                     // offset.
2642                     //
2643                     // If the niche is a pointer, it's either valid (according
2644                     // to its type), or null (which the niche field's scalar
2645                     // validity range encodes).  This allows using
2646                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2647                     // this will continue to work as long as we don't start
2648                     // using more niches than just null (e.g., the first page of
2649                     // the address space, or unaligned pointers).
2650                     Variants::Multiple {
2651                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2652                         tag_field,
2653                         ..
2654                     } if this.fields.offset(tag_field) == offset => {
2655                         Some(this.for_variant(cx, dataful_variant))
2656                     }
2657                     _ => Some(this),
2658                 };
2659
2660                 if let Some(variant) = data_variant {
2661                     // We're not interested in any unions.
2662                     if let FieldsShape::Union(_) = variant.fields {
2663                         data_variant = None;
2664                     }
2665                 }
2666
2667                 let mut result = None;
2668
2669                 if let Some(variant) = data_variant {
2670                     let ptr_end = offset + Pointer.size(cx);
2671                     for i in 0..variant.fields.count() {
2672                         let field_start = variant.fields.offset(i);
2673                         if field_start <= offset {
2674                             let field = variant.field(cx, i);
2675                             result = field.to_result().ok().and_then(|field| {
2676                                 if ptr_end <= field_start + field.size {
2677                                     // We found the right field, look inside it.
2678                                     let field_info =
2679                                         field.pointee_info_at(cx, offset - field_start);
2680                                     field_info
2681                                 } else {
2682                                     None
2683                                 }
2684                             });
2685                             if result.is_some() {
2686                                 break;
2687                             }
2688                         }
2689                     }
2690                 }
2691
2692                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2693                 if let Some(ref mut pointee) = result {
2694                     if let ty::Adt(def, _) = this.ty.kind() {
2695                         if def.is_box() && offset.bytes() == 0 {
2696                             pointee.safe = Some(PointerKind::UniqueOwned);
2697                         }
2698                     }
2699                 }
2700
2701                 result
2702             }
2703         };
2704
2705         debug!(
2706             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2707             offset,
2708             this.ty.kind(),
2709             pointee_info
2710         );
2711
2712         pointee_info
2713     }
2714
2715     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2716         matches!(this.ty.kind(), ty::Adt(..))
2717     }
2718
2719     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2720         this.ty.kind() == &ty::Never
2721     }
2722
2723     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2724         matches!(this.ty.kind(), ty::Tuple(..))
2725     }
2726
2727     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2728         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2729     }
2730 }
2731
2732 impl<'tcx> ty::Instance<'tcx> {
2733     // NOTE(eddyb) this is private to avoid using it from outside of
2734     // `fn_abi_of_instance` - any other uses are either too high-level
2735     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2736     // or should go through `FnAbi` instead, to avoid losing any
2737     // adjustments `fn_abi_of_instance` might be performing.
2738     fn fn_sig_for_fn_abi(
2739         &self,
2740         tcx: TyCtxt<'tcx>,
2741         param_env: ty::ParamEnv<'tcx>,
2742     ) -> ty::PolyFnSig<'tcx> {
2743         let ty = self.ty(tcx, param_env);
2744         match *ty.kind() {
2745             ty::FnDef(..) => {
2746                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2747                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2748                 // (i.e. due to being inside a projection that got normalized, see
2749                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2750                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2751                 let mut sig = match *ty.kind() {
2752                     ty::FnDef(def_id, substs) => tcx
2753                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2754                         .subst(tcx, substs),
2755                     _ => unreachable!(),
2756                 };
2757
2758                 if let ty::InstanceDef::VtableShim(..) = self.def {
2759                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2760                     sig = sig.map_bound(|mut sig| {
2761                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2762                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2763                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2764                         sig
2765                     });
2766                 }
2767                 sig
2768             }
2769             ty::Closure(def_id, substs) => {
2770                 let sig = substs.as_closure().sig();
2771
2772                 let bound_vars = tcx.mk_bound_variable_kinds(
2773                     sig.bound_vars()
2774                         .iter()
2775                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2776                 );
2777                 let br = ty::BoundRegion {
2778                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2779                     kind: ty::BoundRegionKind::BrEnv,
2780                 };
2781                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2782                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2783
2784                 let sig = sig.skip_binder();
2785                 ty::Binder::bind_with_vars(
2786                     tcx.mk_fn_sig(
2787                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2788                         sig.output(),
2789                         sig.c_variadic,
2790                         sig.unsafety,
2791                         sig.abi,
2792                     ),
2793                     bound_vars,
2794                 )
2795             }
2796             ty::Generator(_, substs, _) => {
2797                 let sig = substs.as_generator().poly_sig();
2798
2799                 let bound_vars = tcx.mk_bound_variable_kinds(
2800                     sig.bound_vars()
2801                         .iter()
2802                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2803                 );
2804                 let br = ty::BoundRegion {
2805                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2806                     kind: ty::BoundRegionKind::BrEnv,
2807                 };
2808                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2809                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2810
2811                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2812                 let pin_adt_ref = tcx.adt_def(pin_did);
2813                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2814                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2815
2816                 let sig = sig.skip_binder();
2817                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2818                 let state_adt_ref = tcx.adt_def(state_did);
2819                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2820                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2821                 ty::Binder::bind_with_vars(
2822                     tcx.mk_fn_sig(
2823                         [env_ty, sig.resume_ty].iter(),
2824                         &ret_ty,
2825                         false,
2826                         hir::Unsafety::Normal,
2827                         rustc_target::spec::abi::Abi::Rust,
2828                     ),
2829                     bound_vars,
2830                 )
2831             }
2832             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2833         }
2834     }
2835 }
2836
2837 /// Calculates whether a function's ABI can unwind or not.
2838 ///
2839 /// This takes two primary parameters:
2840 ///
2841 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2842 ///   codegen attrs for a defined function. For function pointers this set of
2843 ///   flags is the empty set. This is only applicable for Rust-defined
2844 ///   functions, and generally isn't needed except for small optimizations where
2845 ///   we try to say a function which otherwise might look like it could unwind
2846 ///   doesn't actually unwind (such as for intrinsics and such).
2847 ///
2848 /// * `abi` - this is the ABI that the function is defined with. This is the
2849 ///   primary factor for determining whether a function can unwind or not.
2850 ///
2851 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2852 /// panics are implemented with unwinds on most platform (when
2853 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2854 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2855 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2856 /// defined for each ABI individually, but it always corresponds to some form of
2857 /// stack-based unwinding (the exact mechanism of which varies
2858 /// platform-by-platform).
2859 ///
2860 /// Rust functions are classified whether or not they can unwind based on the
2861 /// active "panic strategy". In other words Rust functions are considered to
2862 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2863 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2864 /// only if the final panic mode is panic=abort. In this scenario any code
2865 /// previously compiled assuming that a function can unwind is still correct, it
2866 /// just never happens to actually unwind at runtime.
2867 ///
2868 /// This function's answer to whether or not a function can unwind is quite
2869 /// impactful throughout the compiler. This affects things like:
2870 ///
2871 /// * Calling a function which can't unwind means codegen simply ignores any
2872 ///   associated unwinding cleanup.
2873 /// * Calling a function which can unwind from a function which can't unwind
2874 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2875 ///   aborts the process.
2876 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2877 ///   affects various optimizations and codegen.
2878 ///
2879 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2880 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2881 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2882 /// might (from a foreign exception or similar).
2883 #[inline]
2884 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2885     if let Some(did) = fn_def_id {
2886         // Special attribute for functions which can't unwind.
2887         if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2888             return false;
2889         }
2890
2891         // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2892         //
2893         // This is not part of `codegen_fn_attrs` as it can differ between crates
2894         // and therefore cannot be computed in core.
2895         if tcx.sess.opts.debugging_opts.panic_in_drop == PanicStrategy::Abort {
2896             if Some(did) == tcx.lang_items().drop_in_place_fn() {
2897                 return false;
2898             }
2899         }
2900     }
2901
2902     // Otherwise if this isn't special then unwinding is generally determined by
2903     // the ABI of the itself. ABIs like `C` have variants which also
2904     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2905     // ABIs have such an option. Otherwise the only other thing here is Rust
2906     // itself, and those ABIs are determined by the panic strategy configured
2907     // for this compilation.
2908     //
2909     // Unfortunately at this time there's also another caveat. Rust [RFC
2910     // 2945][rfc] has been accepted and is in the process of being implemented
2911     // and stabilized. In this interim state we need to deal with historical
2912     // rustc behavior as well as plan for future rustc behavior.
2913     //
2914     // Historically functions declared with `extern "C"` were marked at the
2915     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2916     // or not. This is UB for functions in `panic=unwind` mode that then
2917     // actually panic and unwind. Note that this behavior is true for both
2918     // externally declared functions as well as Rust-defined function.
2919     //
2920     // To fix this UB rustc would like to change in the future to catch unwinds
2921     // from function calls that may unwind within a Rust-defined `extern "C"`
2922     // function and forcibly abort the process, thereby respecting the
2923     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2924     // ready to roll out, so determining whether or not the `C` family of ABIs
2925     // unwinds is conditional not only on their definition but also whether the
2926     // `#![feature(c_unwind)]` feature gate is active.
2927     //
2928     // Note that this means that unlike historical compilers rustc now, by
2929     // default, unconditionally thinks that the `C` ABI may unwind. This will
2930     // prevent some optimization opportunities, however, so we try to scope this
2931     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2932     // to `panic=abort`).
2933     //
2934     // Eventually the check against `c_unwind` here will ideally get removed and
2935     // this'll be a little cleaner as it'll be a straightforward check of the
2936     // ABI.
2937     //
2938     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2939     use SpecAbi::*;
2940     match abi {
2941         C { unwind }
2942         | System { unwind }
2943         | Cdecl { unwind }
2944         | Stdcall { unwind }
2945         | Fastcall { unwind }
2946         | Vectorcall { unwind }
2947         | Thiscall { unwind }
2948         | Aapcs { unwind }
2949         | Win64 { unwind }
2950         | SysV64 { unwind } => {
2951             unwind
2952                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2953         }
2954         PtxKernel
2955         | Msp430Interrupt
2956         | X86Interrupt
2957         | AmdGpuKernel
2958         | EfiApi
2959         | AvrInterrupt
2960         | AvrNonBlockingInterrupt
2961         | CCmseNonSecureCall
2962         | Wasm
2963         | RustIntrinsic
2964         | PlatformIntrinsic
2965         | Unadjusted => false,
2966         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2967     }
2968 }
2969
2970 #[inline]
2971 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2972     use rustc_target::spec::abi::Abi::*;
2973     match tcx.sess.target.adjust_abi(abi) {
2974         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2975
2976         // It's the ABI's job to select this, not ours.
2977         System { .. } => bug!("system abi should be selected elsewhere"),
2978         EfiApi => bug!("eficall abi should be selected elsewhere"),
2979
2980         Stdcall { .. } => Conv::X86Stdcall,
2981         Fastcall { .. } => Conv::X86Fastcall,
2982         Vectorcall { .. } => Conv::X86VectorCall,
2983         Thiscall { .. } => Conv::X86ThisCall,
2984         C { .. } => Conv::C,
2985         Unadjusted => Conv::C,
2986         Win64 { .. } => Conv::X86_64Win64,
2987         SysV64 { .. } => Conv::X86_64SysV,
2988         Aapcs { .. } => Conv::ArmAapcs,
2989         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2990         PtxKernel => Conv::PtxKernel,
2991         Msp430Interrupt => Conv::Msp430Intr,
2992         X86Interrupt => Conv::X86Intr,
2993         AmdGpuKernel => Conv::AmdGpuKernel,
2994         AvrInterrupt => Conv::AvrInterrupt,
2995         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2996         Wasm => Conv::C,
2997
2998         // These API constants ought to be more specific...
2999         Cdecl { .. } => Conv::C,
3000     }
3001 }
3002
3003 /// Error produced by attempting to compute or adjust a `FnAbi`.
3004 #[derive(Copy, Clone, Debug, HashStable)]
3005 pub enum FnAbiError<'tcx> {
3006     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3007     Layout(LayoutError<'tcx>),
3008
3009     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3010     AdjustForForeignAbi(call::AdjustForForeignAbiError),
3011 }
3012
3013 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3014     fn from(err: LayoutError<'tcx>) -> Self {
3015         Self::Layout(err)
3016     }
3017 }
3018
3019 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3020     fn from(err: call::AdjustForForeignAbiError) -> Self {
3021         Self::AdjustForForeignAbi(err)
3022     }
3023 }
3024
3025 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3026     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3027         match self {
3028             Self::Layout(err) => err.fmt(f),
3029             Self::AdjustForForeignAbi(err) => err.fmt(f),
3030         }
3031     }
3032 }
3033
3034 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3035 // just for error handling.
3036 #[derive(Debug)]
3037 pub enum FnAbiRequest<'tcx> {
3038     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3039     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3040 }
3041
3042 /// Trait for contexts that want to be able to compute `FnAbi`s.
3043 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3044 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3045     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3046     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3047     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3048
3049     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3050     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3051     ///
3052     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3053     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3054     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3055     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3056     fn handle_fn_abi_err(
3057         &self,
3058         err: FnAbiError<'tcx>,
3059         span: Span,
3060         fn_abi_request: FnAbiRequest<'tcx>,
3061     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3062 }
3063
3064 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3065 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3066     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3067     ///
3068     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3069     /// instead, where the instance is an `InstanceDef::Virtual`.
3070     #[inline]
3071     fn fn_abi_of_fn_ptr(
3072         &self,
3073         sig: ty::PolyFnSig<'tcx>,
3074         extra_args: &'tcx ty::List<Ty<'tcx>>,
3075     ) -> Self::FnAbiOfResult {
3076         // FIXME(eddyb) get a better `span` here.
3077         let span = self.layout_tcx_at_span();
3078         let tcx = self.tcx().at(span);
3079
3080         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3081             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3082         ))
3083     }
3084
3085     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3086     /// direct calls to an `fn`.
3087     ///
3088     /// NB: that includes virtual calls, which are represented by "direct calls"
3089     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3090     #[inline]
3091     fn fn_abi_of_instance(
3092         &self,
3093         instance: ty::Instance<'tcx>,
3094         extra_args: &'tcx ty::List<Ty<'tcx>>,
3095     ) -> Self::FnAbiOfResult {
3096         // FIXME(eddyb) get a better `span` here.
3097         let span = self.layout_tcx_at_span();
3098         let tcx = self.tcx().at(span);
3099
3100         MaybeResult::from(
3101             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3102                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3103                 // we can get some kind of span even if one wasn't provided.
3104                 // However, we don't do this early in order to avoid calling
3105                 // `def_span` unconditionally (which may have a perf penalty).
3106                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3107                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3108             }),
3109         )
3110     }
3111 }
3112
3113 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3114
3115 fn fn_abi_of_fn_ptr<'tcx>(
3116     tcx: TyCtxt<'tcx>,
3117     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3118 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3119     let (param_env, (sig, extra_args)) = query.into_parts();
3120
3121     LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3122 }
3123
3124 fn fn_abi_of_instance<'tcx>(
3125     tcx: TyCtxt<'tcx>,
3126     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3127 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3128     let (param_env, (instance, extra_args)) = query.into_parts();
3129
3130     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3131
3132     let caller_location = if instance.def.requires_caller_location(tcx) {
3133         Some(tcx.caller_location_ty())
3134     } else {
3135         None
3136     };
3137
3138     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3139         sig,
3140         extra_args,
3141         caller_location,
3142         Some(instance.def_id()),
3143         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3144     )
3145 }
3146
3147 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3148     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3149     // arguments of this method, into a separate `struct`.
3150     fn fn_abi_new_uncached(
3151         &self,
3152         sig: ty::PolyFnSig<'tcx>,
3153         extra_args: &[Ty<'tcx>],
3154         caller_location: Option<Ty<'tcx>>,
3155         fn_def_id: Option<DefId>,
3156         // FIXME(eddyb) replace this with something typed, like an `enum`.
3157         force_thin_self_ptr: bool,
3158     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3159         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3160
3161         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3162
3163         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3164
3165         let mut inputs = sig.inputs();
3166         let extra_args = if sig.abi == RustCall {
3167             assert!(!sig.c_variadic && extra_args.is_empty());
3168
3169             if let Some(input) = sig.inputs().last() {
3170                 if let ty::Tuple(tupled_arguments) = input.kind() {
3171                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3172                     tupled_arguments
3173                 } else {
3174                     bug!(
3175                         "argument to function with \"rust-call\" ABI \
3176                             is not a tuple"
3177                     );
3178                 }
3179             } else {
3180                 bug!(
3181                     "argument to function with \"rust-call\" ABI \
3182                         is not a tuple"
3183                 );
3184             }
3185         } else {
3186             assert!(sig.c_variadic || extra_args.is_empty());
3187             extra_args
3188         };
3189
3190         let target = &self.tcx.sess.target;
3191         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3192         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3193         let linux_s390x_gnu_like =
3194             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3195         let linux_sparc64_gnu_like =
3196             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3197         let linux_powerpc_gnu_like =
3198             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3199         use SpecAbi::*;
3200         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3201
3202         // Handle safe Rust thin and fat pointers.
3203         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3204                                       scalar: Scalar,
3205                                       layout: TyAndLayout<'tcx>,
3206                                       offset: Size,
3207                                       is_return: bool| {
3208             // Booleans are always a noundef i1 that needs to be zero-extended.
3209             if scalar.is_bool() {
3210                 attrs.ext(ArgExtension::Zext);
3211                 attrs.set(ArgAttribute::NoUndef);
3212                 return;
3213             }
3214
3215             // Scalars which have invalid values cannot be undef.
3216             if !scalar.is_always_valid(self) {
3217                 attrs.set(ArgAttribute::NoUndef);
3218             }
3219
3220             // Only pointer types handled below.
3221             let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3222
3223             if !valid_range.contains(0) {
3224                 attrs.set(ArgAttribute::NonNull);
3225             }
3226
3227             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3228                 if let Some(kind) = pointee.safe {
3229                     attrs.pointee_align = Some(pointee.align);
3230
3231                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3232                     // for the entire duration of the function as they can be deallocated
3233                     // at any time. Set their valid size to 0.
3234                     attrs.pointee_size = match kind {
3235                         PointerKind::UniqueOwned => Size::ZERO,
3236                         _ => pointee.size,
3237                     };
3238
3239                     // `Box`, `&T`, and `&mut T` cannot be undef.
3240                     // Note that this only applies to the value of the pointer itself;
3241                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3242                     attrs.set(ArgAttribute::NoUndef);
3243
3244                     // `Box` pointer parameters never alias because ownership is transferred
3245                     // `&mut` pointer parameters never alias other parameters,
3246                     // or mutable global data
3247                     //
3248                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3249                     // and can be marked as both `readonly` and `noalias`, as
3250                     // LLVM's definition of `noalias` is based solely on memory
3251                     // dependencies rather than pointer equality
3252                     //
3253                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3254                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3255                     // or not to actually emit the attribute. It can also be controlled with the
3256                     // `-Zmutable-noalias` debugging option.
3257                     let no_alias = match kind {
3258                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3259                         PointerKind::UniqueOwned => true,
3260                         PointerKind::Frozen => !is_return,
3261                     };
3262                     if no_alias {
3263                         attrs.set(ArgAttribute::NoAlias);
3264                     }
3265
3266                     if kind == PointerKind::Frozen && !is_return {
3267                         attrs.set(ArgAttribute::ReadOnly);
3268                     }
3269
3270                     if kind == PointerKind::UniqueBorrowed && !is_return {
3271                         attrs.set(ArgAttribute::NoAliasMutRef);
3272                     }
3273                 }
3274             }
3275         };
3276
3277         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3278             let is_return = arg_idx.is_none();
3279
3280             let layout = self.layout_of(ty)?;
3281             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3282                 // Don't pass the vtable, it's not an argument of the virtual fn.
3283                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3284                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3285                 make_thin_self_ptr(self, layout)
3286             } else {
3287                 layout
3288             };
3289
3290             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3291                 let mut attrs = ArgAttributes::new();
3292                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3293                 attrs
3294             });
3295
3296             if arg.layout.is_zst() {
3297                 // For some forsaken reason, x86_64-pc-windows-gnu
3298                 // doesn't ignore zero-sized struct arguments.
3299                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3300                 if is_return
3301                     || rust_abi
3302                     || (!win_x64_gnu
3303                         && !linux_s390x_gnu_like
3304                         && !linux_sparc64_gnu_like
3305                         && !linux_powerpc_gnu_like)
3306                 {
3307                     arg.mode = PassMode::Ignore;
3308                 }
3309             }
3310
3311             Ok(arg)
3312         };
3313
3314         let mut fn_abi = FnAbi {
3315             ret: arg_of(sig.output(), None)?,
3316             args: inputs
3317                 .iter()
3318                 .copied()
3319                 .chain(extra_args.iter().copied())
3320                 .chain(caller_location)
3321                 .enumerate()
3322                 .map(|(i, ty)| arg_of(ty, Some(i)))
3323                 .collect::<Result<_, _>>()?,
3324             c_variadic: sig.c_variadic,
3325             fixed_count: inputs.len(),
3326             conv,
3327             can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3328         };
3329         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3330         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3331         Ok(self.tcx.arena.alloc(fn_abi))
3332     }
3333
3334     fn fn_abi_adjust_for_abi(
3335         &self,
3336         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3337         abi: SpecAbi,
3338     ) -> Result<(), FnAbiError<'tcx>> {
3339         if abi == SpecAbi::Unadjusted {
3340             return Ok(());
3341         }
3342
3343         if abi == SpecAbi::Rust
3344             || abi == SpecAbi::RustCall
3345             || abi == SpecAbi::RustIntrinsic
3346             || abi == SpecAbi::PlatformIntrinsic
3347         {
3348             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3349                 if arg.is_ignore() {
3350                     return;
3351                 }
3352
3353                 match arg.layout.abi {
3354                     Abi::Aggregate { .. } => {}
3355
3356                     // This is a fun case! The gist of what this is doing is
3357                     // that we want callers and callees to always agree on the
3358                     // ABI of how they pass SIMD arguments. If we were to *not*
3359                     // make these arguments indirect then they'd be immediates
3360                     // in LLVM, which means that they'd used whatever the
3361                     // appropriate ABI is for the callee and the caller. That
3362                     // means, for example, if the caller doesn't have AVX
3363                     // enabled but the callee does, then passing an AVX argument
3364                     // across this boundary would cause corrupt data to show up.
3365                     //
3366                     // This problem is fixed by unconditionally passing SIMD
3367                     // arguments through memory between callers and callees
3368                     // which should get them all to agree on ABI regardless of
3369                     // target feature sets. Some more information about this
3370                     // issue can be found in #44367.
3371                     //
3372                     // Note that the platform intrinsic ABI is exempt here as
3373                     // that's how we connect up to LLVM and it's unstable
3374                     // anyway, we control all calls to it in libstd.
3375                     Abi::Vector { .. }
3376                         if abi != SpecAbi::PlatformIntrinsic
3377                             && self.tcx.sess.target.simd_types_indirect =>
3378                     {
3379                         arg.make_indirect();
3380                         return;
3381                     }
3382
3383                     _ => return,
3384                 }
3385
3386                 let size = arg.layout.size;
3387                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3388                     arg.make_indirect();
3389                 } else {
3390                     // We want to pass small aggregates as immediates, but using
3391                     // a LLVM aggregate type for this leads to bad optimizations,
3392                     // so we pick an appropriately sized integer type instead.
3393                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3394                 }
3395             };
3396             fixup(&mut fn_abi.ret);
3397             for arg in &mut fn_abi.args {
3398                 fixup(arg);
3399             }
3400         } else {
3401             fn_abi.adjust_for_foreign_abi(self, abi)?;
3402         }
3403
3404         Ok(())
3405     }
3406 }
3407
3408 fn make_thin_self_ptr<'tcx>(
3409     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3410     layout: TyAndLayout<'tcx>,
3411 ) -> TyAndLayout<'tcx> {
3412     let tcx = cx.tcx();
3413     let fat_pointer_ty = if layout.is_unsized() {
3414         // unsized `self` is passed as a pointer to `self`
3415         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3416         tcx.mk_mut_ptr(layout.ty)
3417     } else {
3418         match layout.abi {
3419             Abi::ScalarPair(..) => (),
3420             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3421         }
3422
3423         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3424         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3425         // elsewhere in the compiler as a method on a `dyn Trait`.
3426         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3427         // get a built-in pointer type
3428         let mut fat_pointer_layout = layout;
3429         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3430             && !fat_pointer_layout.ty.is_region_ptr()
3431         {
3432             for i in 0..fat_pointer_layout.fields.count() {
3433                 let field_layout = fat_pointer_layout.field(cx, i);
3434
3435                 if !field_layout.is_zst() {
3436                     fat_pointer_layout = field_layout;
3437                     continue 'descend_newtypes;
3438                 }
3439             }
3440
3441             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3442         }
3443
3444         fat_pointer_layout.ty
3445     };
3446
3447     // we now have a type like `*mut RcBox<dyn Trait>`
3448     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3449     // this is understood as a special case elsewhere in the compiler
3450     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3451
3452     TyAndLayout {
3453         ty: fat_pointer_ty,
3454
3455         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3456         // should always work because the type is always `*mut ()`.
3457         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3458     }
3459 }