]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #97210 - Milo123459:clippy-args, r=jyn514
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_hir as hir;
9 use rustc_hir::def_id::DefId;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
18 };
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
21
22 use std::cmp;
23 use std::fmt;
24 use std::iter;
25 use std::num::NonZeroUsize;
26 use std::ops::Bound;
27
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
30
31 pub fn provide(providers: &mut ty::query::Providers) {
32     *providers =
33         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
34 }
35
36 pub trait IntegerExt {
37     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
41     fn repr_discr<'tcx>(
42         tcx: TyCtxt<'tcx>,
43         ty: Ty<'tcx>,
44         repr: &ReprOptions,
45         min: i128,
46         max: i128,
47     ) -> (Integer, bool);
48 }
49
50 impl IntegerExt for Integer {
51     #[inline]
52     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53         match (*self, signed) {
54             (I8, false) => tcx.types.u8,
55             (I16, false) => tcx.types.u16,
56             (I32, false) => tcx.types.u32,
57             (I64, false) => tcx.types.u64,
58             (I128, false) => tcx.types.u128,
59             (I8, true) => tcx.types.i8,
60             (I16, true) => tcx.types.i16,
61             (I32, true) => tcx.types.i32,
62             (I64, true) => tcx.types.i64,
63             (I128, true) => tcx.types.i128,
64         }
65     }
66
67     /// Gets the Integer type from an attr::IntType.
68     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69         let dl = cx.data_layout();
70
71         match ity {
72             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78                 dl.ptr_sized_integer()
79             }
80         }
81     }
82
83     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
84         match ity {
85             ty::IntTy::I8 => I8,
86             ty::IntTy::I16 => I16,
87             ty::IntTy::I32 => I32,
88             ty::IntTy::I64 => I64,
89             ty::IntTy::I128 => I128,
90             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
91         }
92     }
93     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
94         match ity {
95             ty::UintTy::U8 => I8,
96             ty::UintTy::U16 => I16,
97             ty::UintTy::U32 => I32,
98             ty::UintTy::U64 => I64,
99             ty::UintTy::U128 => I128,
100             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
101         }
102     }
103
104     /// Finds the appropriate Integer type and signedness for the given
105     /// signed discriminant range and `#[repr]` attribute.
106     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107     /// that shouldn't affect anything, other than maybe debuginfo.
108     fn repr_discr<'tcx>(
109         tcx: TyCtxt<'tcx>,
110         ty: Ty<'tcx>,
111         repr: &ReprOptions,
112         min: i128,
113         max: i128,
114     ) -> (Integer, bool) {
115         // Theoretically, negative values could be larger in unsigned representation
116         // than the unsigned representation of the signed minimum. However, if there
117         // are any negative values, the only valid unsigned representation is u128
118         // which can fit all i128 values, so the result remains unaffected.
119         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
121
122         if let Some(ity) = repr.int {
123             let discr = Integer::from_attr(&tcx, ity);
124             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
125             if discr < fit {
126                 bug!(
127                     "Integer::repr_discr: `#[repr]` hint too small for \
128                       discriminant range of enum `{}",
129                     ty
130                 )
131             }
132             return (discr, ity.is_signed());
133         }
134
135         let at_least = if repr.c() {
136             // This is usually I32, however it can be different on some platforms,
137             // notably hexagon and arm-none/thumb-none
138             tcx.data_layout().c_enum_min_size
139         } else {
140             // repr(Rust) enums try to be as small as possible
141             I8
142         };
143
144         // If there are no negative values, we can use the unsigned fit.
145         if min >= 0 {
146             (cmp::max(unsigned_fit, at_least), false)
147         } else {
148             (cmp::max(signed_fit, at_least), true)
149         }
150     }
151 }
152
153 pub trait PrimitiveExt {
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 }
157
158 impl PrimitiveExt for Primitive {
159     #[inline]
160     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161         match *self {
162             Int(i, signed) => i.to_ty(tcx, signed),
163             F32 => tcx.types.f32,
164             F64 => tcx.types.f64,
165             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
166         }
167     }
168
169     /// Return an *integer* type matching this primitive.
170     /// Useful in particular when dealing with enum discriminants.
171     #[inline]
172     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173         match *self {
174             Int(i, signed) => i.to_ty(tcx, signed),
175             Pointer => tcx.types.usize,
176             F32 | F64 => bug!("floats do not have an int type"),
177         }
178     }
179 }
180
181 /// The first half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
186
187 /// The second half of a fat pointer.
188 ///
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
192
193 /// The maximum supported number of lanes in a SIMD vector.
194 ///
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
202     Unknown(Ty<'tcx>),
203     SizeOverflow(Ty<'tcx>),
204     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
205 }
206
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209         match *self {
210             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211             LayoutError::SizeOverflow(ty) => {
212                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213             }
214             LayoutError::NormalizationFailure(t, e) => write!(
215                 f,
216                 "unable to determine layout for `{}` because `{}` cannot be normalized",
217                 t,
218                 e.get_type_for_failure()
219             ),
220         }
221     }
222 }
223
224 /// Enforce some basic invariants on layouts.
225 fn sanity_check_layout<'tcx>(
226     tcx: TyCtxt<'tcx>,
227     param_env: ty::ParamEnv<'tcx>,
228     layout: &TyAndLayout<'tcx>,
229 ) {
230     // Type-level uninhabitedness should always imply ABI uninhabitedness.
231     if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) {
232         assert!(layout.abi.is_uninhabited());
233     }
234
235     if cfg!(debug_assertions) {
236         fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) {
237             match layout.abi() {
238                 Abi::Scalar(scalar) => {
239                     // No padding in scalars.
240                     assert_eq!(
241                         layout.align().abi,
242                         scalar.align(&tcx).abi,
243                         "alignment mismatch between ABI and layout in {layout:#?}"
244                     );
245                     assert_eq!(
246                         layout.size(),
247                         scalar.size(&tcx),
248                         "size mismatch between ABI and layout in {layout:#?}"
249                     );
250                 }
251                 Abi::Vector { count, element } => {
252                     // No padding in vectors. Alignment can be strengthened, though.
253                     assert!(
254                         layout.align().abi >= element.align(&tcx).abi,
255                         "alignment mismatch between ABI and layout in {layout:#?}"
256                     );
257                     let size = element.size(&tcx) * count;
258                     assert_eq!(
259                         layout.size(),
260                         size.align_to(tcx.data_layout().vector_align(size).abi),
261                         "size mismatch between ABI and layout in {layout:#?}"
262                     );
263                 }
264                 Abi::ScalarPair(scalar1, scalar2) => {
265                     // Sanity-check scalar pairs. These are a bit more flexible and support
266                     // padding, but we can at least ensure both fields actually fit into the layout
267                     // and the alignment requirement has not been weakened.
268                     let align1 = scalar1.align(&tcx).abi;
269                     let align2 = scalar2.align(&tcx).abi;
270                     assert!(
271                         layout.align().abi >= cmp::max(align1, align2),
272                         "alignment mismatch between ABI and layout in {layout:#?}",
273                     );
274                     let field2_offset = scalar1.size(&tcx).align_to(align2);
275                     assert!(
276                         layout.size() >= field2_offset + scalar2.size(&tcx),
277                         "size mismatch between ABI and layout in {layout:#?}"
278                     );
279                 }
280                 Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
281             }
282         }
283
284         check_layout_abi(tcx, layout.layout);
285
286         if let Variants::Multiple { variants, .. } = &layout.variants {
287             for variant in variants {
288                 check_layout_abi(tcx, *variant);
289                 // No nested "multiple".
290                 assert!(matches!(variant.variants(), Variants::Single { .. }));
291                 // Skip empty variants.
292                 if variant.size() == Size::ZERO
293                     || variant.fields().count() == 0
294                     || variant.abi().is_uninhabited()
295                 {
296                     // These are never actually accessed anyway, so we can skip them. (Note that
297                     // sometimes, variants with fields have size 0, and sometimes, variants without
298                     // fields have non-0 size.)
299                     continue;
300                 }
301                 // Variants should have the same or a smaller size as the full thing.
302                 if variant.size() > layout.size {
303                     bug!(
304                         "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
305                         layout.size.bytes(),
306                         variant.size().bytes(),
307                     )
308                 }
309                 // The top-level ABI and the ABI of the variants should be coherent.
310                 let abi_coherent = match (layout.abi, variant.abi()) {
311                     (Abi::Scalar(..), Abi::Scalar(..)) => true,
312                     (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
313                     (Abi::Uninhabited, _) => true,
314                     (Abi::Aggregate { .. }, _) => true,
315                     _ => false,
316                 };
317                 if !abi_coherent {
318                     bug!(
319                         "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
320                         variant
321                     );
322                 }
323             }
324         }
325     }
326 }
327
328 #[instrument(skip(tcx, query), level = "debug")]
329 fn layout_of<'tcx>(
330     tcx: TyCtxt<'tcx>,
331     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
332 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
333     ty::tls::with_related_context(tcx, move |icx| {
334         let (param_env, ty) = query.into_parts();
335         debug!(?ty);
336
337         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
338             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
339         }
340
341         // Update the ImplicitCtxt to increase the layout_depth
342         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
343
344         ty::tls::enter_context(&icx, |_| {
345             let param_env = param_env.with_reveal_all_normalized(tcx);
346             let unnormalized_ty = ty;
347
348             // FIXME: We might want to have two different versions of `layout_of`:
349             // One that can be called after typecheck has completed and can use
350             // `normalize_erasing_regions` here and another one that can be called
351             // before typecheck has completed and uses `try_normalize_erasing_regions`.
352             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
353                 Ok(t) => t,
354                 Err(normalization_error) => {
355                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
356                 }
357             };
358
359             if ty != unnormalized_ty {
360                 // Ensure this layout is also cached for the normalized type.
361                 return tcx.layout_of(param_env.and(ty));
362             }
363
364             let cx = LayoutCx { tcx, param_env };
365
366             let layout = cx.layout_of_uncached(ty)?;
367             let layout = TyAndLayout { ty, layout };
368
369             cx.record_layout_for_printing(layout);
370
371             sanity_check_layout(tcx, param_env, &layout);
372
373             Ok(layout)
374         })
375     })
376 }
377
378 pub struct LayoutCx<'tcx, C> {
379     pub tcx: C,
380     pub param_env: ty::ParamEnv<'tcx>,
381 }
382
383 #[derive(Copy, Clone, Debug)]
384 enum StructKind {
385     /// A tuple, closure, or univariant which cannot be coerced to unsized.
386     AlwaysSized,
387     /// A univariant, the last field of which may be coerced to unsized.
388     MaybeUnsized,
389     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
390     Prefixed(Size, Align),
391 }
392
393 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
394 // This is used to go between `memory_index` (source field order to memory order)
395 // and `inverse_memory_index` (memory order to source field order).
396 // See also `FieldsShape::Arbitrary::memory_index` for more details.
397 // FIXME(eddyb) build a better abstraction for permutations, if possible.
398 fn invert_mapping(map: &[u32]) -> Vec<u32> {
399     let mut inverse = vec![0; map.len()];
400     for i in 0..map.len() {
401         inverse[map[i] as usize] = i as u32;
402     }
403     inverse
404 }
405
406 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
407     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
408         let dl = self.data_layout();
409         let b_align = b.align(dl);
410         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
411         let b_offset = a.size(dl).align_to(b_align.abi);
412         let size = (b_offset + b.size(dl)).align_to(align.abi);
413
414         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
415         // returns the last maximum.
416         let largest_niche = Niche::from_scalar(dl, b_offset, b)
417             .into_iter()
418             .chain(Niche::from_scalar(dl, Size::ZERO, a))
419             .max_by_key(|niche| niche.available(dl));
420
421         LayoutS {
422             variants: Variants::Single { index: VariantIdx::new(0) },
423             fields: FieldsShape::Arbitrary {
424                 offsets: vec![Size::ZERO, b_offset],
425                 memory_index: vec![0, 1],
426             },
427             abi: Abi::ScalarPair(a, b),
428             largest_niche,
429             align,
430             size,
431         }
432     }
433
434     fn univariant_uninterned(
435         &self,
436         ty: Ty<'tcx>,
437         fields: &[TyAndLayout<'_>],
438         repr: &ReprOptions,
439         kind: StructKind,
440     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
441         let dl = self.data_layout();
442         let pack = repr.pack;
443         if pack.is_some() && repr.align.is_some() {
444             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
445             return Err(LayoutError::Unknown(ty));
446         }
447
448         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
449
450         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
451
452         let optimize = !repr.inhibit_struct_field_reordering_opt();
453         if optimize {
454             let end =
455                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
456             let optimizing = &mut inverse_memory_index[..end];
457             let field_align = |f: &TyAndLayout<'_>| {
458                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
459             };
460
461             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
462             // the field ordering to try and catch some code making assumptions about layouts
463             // we don't guarantee
464             if repr.can_randomize_type_layout() {
465                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
466                 // randomize field ordering with
467                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
468
469                 // Shuffle the ordering of the fields
470                 optimizing.shuffle(&mut rng);
471
472             // Otherwise we just leave things alone and actually optimize the type's fields
473             } else {
474                 match kind {
475                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
476                         optimizing.sort_by_key(|&x| {
477                             // Place ZSTs first to avoid "interesting offsets",
478                             // especially with only one or two non-ZST fields.
479                             let f = &fields[x as usize];
480                             (!f.is_zst(), cmp::Reverse(field_align(f)))
481                         });
482                     }
483
484                     StructKind::Prefixed(..) => {
485                         // Sort in ascending alignment so that the layout stays optimal
486                         // regardless of the prefix
487                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
488                     }
489                 }
490
491                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
492                 //                 regardless of the status of `-Z randomize-layout`
493             }
494         }
495
496         // inverse_memory_index holds field indices by increasing memory offset.
497         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
498         // We now write field offsets to the corresponding offset slot;
499         // field 5 with offset 0 puts 0 in offsets[5].
500         // At the bottom of this function, we invert `inverse_memory_index` to
501         // produce `memory_index` (see `invert_mapping`).
502
503         let mut sized = true;
504         let mut offsets = vec![Size::ZERO; fields.len()];
505         let mut offset = Size::ZERO;
506         let mut largest_niche = None;
507         let mut largest_niche_available = 0;
508
509         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
510             let prefix_align =
511                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
512             align = align.max(AbiAndPrefAlign::new(prefix_align));
513             offset = prefix_size.align_to(prefix_align);
514         }
515
516         for &i in &inverse_memory_index {
517             let field = fields[i as usize];
518             if !sized {
519                 self.tcx.sess.delay_span_bug(
520                     DUMMY_SP,
521                     &format!(
522                         "univariant: field #{} of `{}` comes after unsized field",
523                         offsets.len(),
524                         ty
525                     ),
526                 );
527             }
528
529             if field.is_unsized() {
530                 sized = false;
531             }
532
533             // Invariant: offset < dl.obj_size_bound() <= 1<<61
534             let field_align = if let Some(pack) = pack {
535                 field.align.min(AbiAndPrefAlign::new(pack))
536             } else {
537                 field.align
538             };
539             offset = offset.align_to(field_align.abi);
540             align = align.max(field_align);
541
542             debug!("univariant offset: {:?} field: {:#?}", offset, field);
543             offsets[i as usize] = offset;
544
545             if !repr.hide_niche() {
546                 if let Some(mut niche) = field.largest_niche {
547                     let available = niche.available(dl);
548                     if available > largest_niche_available {
549                         largest_niche_available = available;
550                         niche.offset += offset;
551                         largest_niche = Some(niche);
552                     }
553                 }
554             }
555
556             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
557         }
558
559         if let Some(repr_align) = repr.align {
560             align = align.max(AbiAndPrefAlign::new(repr_align));
561         }
562
563         debug!("univariant min_size: {:?}", offset);
564         let min_size = offset;
565
566         // As stated above, inverse_memory_index holds field indices by increasing offset.
567         // This makes it an already-sorted view of the offsets vec.
568         // To invert it, consider:
569         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
570         // Field 5 would be the first element, so memory_index is i:
571         // Note: if we didn't optimize, it's already right.
572
573         let memory_index =
574             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
575
576         let size = min_size.align_to(align.abi);
577         let mut abi = Abi::Aggregate { sized };
578
579         // Unpack newtype ABIs and find scalar pairs.
580         if sized && size.bytes() > 0 {
581             // All other fields must be ZSTs.
582             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
583
584             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
585                 // We have exactly one non-ZST field.
586                 (Some((i, field)), None, None) => {
587                     // Field fills the struct and it has a scalar or scalar pair ABI.
588                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
589                     {
590                         match field.abi {
591                             // For plain scalars, or vectors of them, we can't unpack
592                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
593                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
594                                 abi = field.abi;
595                             }
596                             // But scalar pairs are Rust-specific and get
597                             // treated as aggregates by C ABIs anyway.
598                             Abi::ScalarPair(..) => {
599                                 abi = field.abi;
600                             }
601                             _ => {}
602                         }
603                     }
604                 }
605
606                 // Two non-ZST fields, and they're both scalars.
607                 (Some((i, a)), Some((j, b)), None) => {
608                     match (a.abi, b.abi) {
609                         (Abi::Scalar(a), Abi::Scalar(b)) => {
610                             // Order by the memory placement, not source order.
611                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
612                                 ((i, a), (j, b))
613                             } else {
614                                 ((j, b), (i, a))
615                             };
616                             let pair = self.scalar_pair(a, b);
617                             let pair_offsets = match pair.fields {
618                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
619                                     assert_eq!(memory_index, &[0, 1]);
620                                     offsets
621                                 }
622                                 _ => bug!(),
623                             };
624                             if offsets[i] == pair_offsets[0]
625                                 && offsets[j] == pair_offsets[1]
626                                 && align == pair.align
627                                 && size == pair.size
628                             {
629                                 // We can use `ScalarPair` only when it matches our
630                                 // already computed layout (including `#[repr(C)]`).
631                                 abi = pair.abi;
632                             }
633                         }
634                         _ => {}
635                     }
636                 }
637
638                 _ => {}
639             }
640         }
641
642         if fields.iter().any(|f| f.abi.is_uninhabited()) {
643             abi = Abi::Uninhabited;
644         }
645
646         Ok(LayoutS {
647             variants: Variants::Single { index: VariantIdx::new(0) },
648             fields: FieldsShape::Arbitrary { offsets, memory_index },
649             abi,
650             largest_niche,
651             align,
652             size,
653         })
654     }
655
656     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
657         let tcx = self.tcx;
658         let param_env = self.param_env;
659         let dl = self.data_layout();
660         let scalar_unit = |value: Primitive| {
661             let size = value.size(dl);
662             assert!(size.bits() <= 128);
663             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
664         };
665         let scalar =
666             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
667
668         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
669             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
670         };
671         debug_assert!(!ty.has_infer_types_or_consts());
672
673         Ok(match *ty.kind() {
674             // Basic scalars.
675             ty::Bool => tcx.intern_layout(LayoutS::scalar(
676                 self,
677                 Scalar::Initialized {
678                     value: Int(I8, false),
679                     valid_range: WrappingRange { start: 0, end: 1 },
680                 },
681             )),
682             ty::Char => tcx.intern_layout(LayoutS::scalar(
683                 self,
684                 Scalar::Initialized {
685                     value: Int(I32, false),
686                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
687                 },
688             )),
689             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
690             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
691             ty::Float(fty) => scalar(match fty {
692                 ty::FloatTy::F32 => F32,
693                 ty::FloatTy::F64 => F64,
694             }),
695             ty::FnPtr(_) => {
696                 let mut ptr = scalar_unit(Pointer);
697                 ptr.valid_range_mut().start = 1;
698                 tcx.intern_layout(LayoutS::scalar(self, ptr))
699             }
700
701             // The never type.
702             ty::Never => tcx.intern_layout(LayoutS {
703                 variants: Variants::Single { index: VariantIdx::new(0) },
704                 fields: FieldsShape::Primitive,
705                 abi: Abi::Uninhabited,
706                 largest_niche: None,
707                 align: dl.i8_align,
708                 size: Size::ZERO,
709             }),
710
711             // Potentially-wide pointers.
712             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
713                 let mut data_ptr = scalar_unit(Pointer);
714                 if !ty.is_unsafe_ptr() {
715                     data_ptr.valid_range_mut().start = 1;
716                 }
717
718                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
719                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
720                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
721                 }
722
723                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
724                 let metadata = match unsized_part.kind() {
725                     ty::Foreign(..) => {
726                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
727                     }
728                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
729                     ty::Dynamic(..) => {
730                         let mut vtable = scalar_unit(Pointer);
731                         vtable.valid_range_mut().start = 1;
732                         vtable
733                     }
734                     _ => return Err(LayoutError::Unknown(unsized_part)),
735                 };
736
737                 // Effectively a (ptr, meta) tuple.
738                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
739             }
740
741             // Arrays and slices.
742             ty::Array(element, mut count) => {
743                 if count.has_projections() {
744                     count = tcx.normalize_erasing_regions(param_env, count);
745                     if count.has_projections() {
746                         return Err(LayoutError::Unknown(ty));
747                     }
748                 }
749
750                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
751                 let element = self.layout_of(element)?;
752                 let size =
753                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
754
755                 let abi =
756                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
757                         Abi::Uninhabited
758                     } else {
759                         Abi::Aggregate { sized: true }
760                     };
761
762                 let largest_niche = if count != 0 { element.largest_niche } else { None };
763
764                 tcx.intern_layout(LayoutS {
765                     variants: Variants::Single { index: VariantIdx::new(0) },
766                     fields: FieldsShape::Array { stride: element.size, count },
767                     abi,
768                     largest_niche,
769                     align: element.align,
770                     size,
771                 })
772             }
773             ty::Slice(element) => {
774                 let element = self.layout_of(element)?;
775                 tcx.intern_layout(LayoutS {
776                     variants: Variants::Single { index: VariantIdx::new(0) },
777                     fields: FieldsShape::Array { stride: element.size, count: 0 },
778                     abi: Abi::Aggregate { sized: false },
779                     largest_niche: None,
780                     align: element.align,
781                     size: Size::ZERO,
782                 })
783             }
784             ty::Str => tcx.intern_layout(LayoutS {
785                 variants: Variants::Single { index: VariantIdx::new(0) },
786                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
787                 abi: Abi::Aggregate { sized: false },
788                 largest_niche: None,
789                 align: dl.i8_align,
790                 size: Size::ZERO,
791             }),
792
793             // Odd unit types.
794             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
795             ty::Dynamic(..) | ty::Foreign(..) => {
796                 let mut unit = self.univariant_uninterned(
797                     ty,
798                     &[],
799                     &ReprOptions::default(),
800                     StructKind::AlwaysSized,
801                 )?;
802                 match unit.abi {
803                     Abi::Aggregate { ref mut sized } => *sized = false,
804                     _ => bug!(),
805                 }
806                 tcx.intern_layout(unit)
807             }
808
809             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
810
811             ty::Closure(_, ref substs) => {
812                 let tys = substs.as_closure().upvar_tys();
813                 univariant(
814                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
815                     &ReprOptions::default(),
816                     StructKind::AlwaysSized,
817                 )?
818             }
819
820             ty::Tuple(tys) => {
821                 let kind =
822                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
823
824                 univariant(
825                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
826                     &ReprOptions::default(),
827                     kind,
828                 )?
829             }
830
831             // SIMD vector types.
832             ty::Adt(def, substs) if def.repr().simd() => {
833                 if !def.is_struct() {
834                     // Should have yielded E0517 by now.
835                     tcx.sess.delay_span_bug(
836                         DUMMY_SP,
837                         "#[repr(simd)] was applied to an ADT that is not a struct",
838                     );
839                     return Err(LayoutError::Unknown(ty));
840                 }
841
842                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
843                 //
844                 // * #[repr(simd)] struct S(T, T, T, T);
845                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
846                 // * #[repr(simd)] struct S([T; 4])
847                 //
848                 // where T is a primitive scalar (integer/float/pointer).
849
850                 // SIMD vectors with zero fields are not supported.
851                 // (should be caught by typeck)
852                 if def.non_enum_variant().fields.is_empty() {
853                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
854                 }
855
856                 // Type of the first ADT field:
857                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
858
859                 // Heterogeneous SIMD vectors are not supported:
860                 // (should be caught by typeck)
861                 for fi in &def.non_enum_variant().fields {
862                     if fi.ty(tcx, substs) != f0_ty {
863                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
864                     }
865                 }
866
867                 // The element type and number of elements of the SIMD vector
868                 // are obtained from:
869                 //
870                 // * the element type and length of the single array field, if
871                 // the first field is of array type, or
872                 //
873                 // * the homogenous field type and the number of fields.
874                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
875                     // First ADT field is an array:
876
877                     // SIMD vectors with multiple array fields are not supported:
878                     // (should be caught by typeck)
879                     if def.non_enum_variant().fields.len() != 1 {
880                         tcx.sess.fatal(&format!(
881                             "monomorphising SIMD type `{}` with more than one array field",
882                             ty
883                         ));
884                     }
885
886                     // Extract the number of elements from the layout of the array field:
887                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
888                         return Err(LayoutError::Unknown(ty));
889                     };
890
891                     (*e_ty, *count, true)
892                 } else {
893                     // First ADT field is not an array:
894                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
895                 };
896
897                 // SIMD vectors of zero length are not supported.
898                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
899                 // support.
900                 //
901                 // Can't be caught in typeck if the array length is generic.
902                 if e_len == 0 {
903                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
904                 } else if e_len > MAX_SIMD_LANES {
905                     tcx.sess.fatal(&format!(
906                         "monomorphising SIMD type `{}` of length greater than {}",
907                         ty, MAX_SIMD_LANES,
908                     ));
909                 }
910
911                 // Compute the ABI of the element type:
912                 let e_ly = self.layout_of(e_ty)?;
913                 let Abi::Scalar(e_abi) = e_ly.abi else {
914                     // This error isn't caught in typeck, e.g., if
915                     // the element type of the vector is generic.
916                     tcx.sess.fatal(&format!(
917                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
918                         (integer/float/pointer) element type `{}`",
919                         ty, e_ty
920                     ))
921                 };
922
923                 // Compute the size and alignment of the vector:
924                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
925                 let align = dl.vector_align(size);
926                 let size = size.align_to(align.abi);
927
928                 // Compute the placement of the vector fields:
929                 let fields = if is_array {
930                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
931                 } else {
932                     FieldsShape::Array { stride: e_ly.size, count: e_len }
933                 };
934
935                 tcx.intern_layout(LayoutS {
936                     variants: Variants::Single { index: VariantIdx::new(0) },
937                     fields,
938                     abi: Abi::Vector { element: e_abi, count: e_len },
939                     largest_niche: e_ly.largest_niche,
940                     size,
941                     align,
942                 })
943             }
944
945             // ADTs.
946             ty::Adt(def, substs) => {
947                 // Cache the field layouts.
948                 let variants = def
949                     .variants()
950                     .iter()
951                     .map(|v| {
952                         v.fields
953                             .iter()
954                             .map(|field| self.layout_of(field.ty(tcx, substs)))
955                             .collect::<Result<Vec<_>, _>>()
956                     })
957                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
958
959                 if def.is_union() {
960                     if def.repr().pack.is_some() && def.repr().align.is_some() {
961                         self.tcx.sess.delay_span_bug(
962                             tcx.def_span(def.did()),
963                             "union cannot be packed and aligned",
964                         );
965                         return Err(LayoutError::Unknown(ty));
966                     }
967
968                     let mut align =
969                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
970
971                     if let Some(repr_align) = def.repr().align {
972                         align = align.max(AbiAndPrefAlign::new(repr_align));
973                     }
974
975                     let optimize = !def.repr().inhibit_union_abi_opt();
976                     let mut size = Size::ZERO;
977                     let mut abi = Abi::Aggregate { sized: true };
978                     let index = VariantIdx::new(0);
979                     for field in &variants[index] {
980                         assert!(!field.is_unsized());
981                         align = align.max(field.align);
982
983                         // If all non-ZST fields have the same ABI, forward this ABI
984                         if optimize && !field.is_zst() {
985                             // Discard valid range information and allow undef
986                             let field_abi = match field.abi {
987                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
988                                 Abi::ScalarPair(x, y) => {
989                                     Abi::ScalarPair(x.to_union(), y.to_union())
990                                 }
991                                 Abi::Vector { element: x, count } => {
992                                     Abi::Vector { element: x.to_union(), count }
993                                 }
994                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
995                                     Abi::Aggregate { sized: true }
996                                 }
997                             };
998
999                             if size == Size::ZERO {
1000                                 // first non ZST: initialize 'abi'
1001                                 abi = field_abi;
1002                             } else if abi != field_abi {
1003                                 // different fields have different ABI: reset to Aggregate
1004                                 abi = Abi::Aggregate { sized: true };
1005                             }
1006                         }
1007
1008                         size = cmp::max(size, field.size);
1009                     }
1010
1011                     if let Some(pack) = def.repr().pack {
1012                         align = align.min(AbiAndPrefAlign::new(pack));
1013                     }
1014
1015                     return Ok(tcx.intern_layout(LayoutS {
1016                         variants: Variants::Single { index },
1017                         fields: FieldsShape::Union(
1018                             NonZeroUsize::new(variants[index].len())
1019                                 .ok_or(LayoutError::Unknown(ty))?,
1020                         ),
1021                         abi,
1022                         largest_niche: None,
1023                         align,
1024                         size: size.align_to(align.abi),
1025                     }));
1026                 }
1027
1028                 // A variant is absent if it's uninhabited and only has ZST fields.
1029                 // Present uninhabited variants only require space for their fields,
1030                 // but *not* an encoding of the discriminant (e.g., a tag value).
1031                 // See issue #49298 for more details on the need to leave space
1032                 // for non-ZST uninhabited data (mostly partial initialization).
1033                 let absent = |fields: &[TyAndLayout<'_>]| {
1034                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
1035                     let is_zst = fields.iter().all(|f| f.is_zst());
1036                     uninhabited && is_zst
1037                 };
1038                 let (present_first, present_second) = {
1039                     let mut present_variants = variants
1040                         .iter_enumerated()
1041                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
1042                     (present_variants.next(), present_variants.next())
1043                 };
1044                 let present_first = match present_first {
1045                     Some(present_first) => present_first,
1046                     // Uninhabited because it has no variants, or only absent ones.
1047                     None if def.is_enum() => {
1048                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
1049                     }
1050                     // If it's a struct, still compute a layout so that we can still compute the
1051                     // field offsets.
1052                     None => VariantIdx::new(0),
1053                 };
1054
1055                 let is_struct = !def.is_enum() ||
1056                     // Only one variant is present.
1057                     (present_second.is_none() &&
1058                     // Representation optimizations are allowed.
1059                     !def.repr().inhibit_enum_layout_opt());
1060                 if is_struct {
1061                     // Struct, or univariant enum equivalent to a struct.
1062                     // (Typechecking will reject discriminant-sizing attrs.)
1063
1064                     let v = present_first;
1065                     let kind = if def.is_enum() || variants[v].is_empty() {
1066                         StructKind::AlwaysSized
1067                     } else {
1068                         let param_env = tcx.param_env(def.did());
1069                         let last_field = def.variant(v).fields.last().unwrap();
1070                         let always_sized =
1071                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
1072                         if !always_sized {
1073                             StructKind::MaybeUnsized
1074                         } else {
1075                             StructKind::AlwaysSized
1076                         }
1077                     };
1078
1079                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
1080                     st.variants = Variants::Single { index: v };
1081                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1082                     match st.abi {
1083                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1084                             // the asserts ensure that we are not using the
1085                             // `#[rustc_layout_scalar_valid_range(n)]`
1086                             // attribute to widen the range of anything as that would probably
1087                             // result in UB somewhere
1088                             // FIXME(eddyb) the asserts are probably not needed,
1089                             // as larger validity ranges would result in missed
1090                             // optimizations, *not* wrongly assuming the inner
1091                             // value is valid. e.g. unions enlarge validity ranges,
1092                             // because the values may be uninitialized.
1093                             if let Bound::Included(start) = start {
1094                                 // FIXME(eddyb) this might be incorrect - it doesn't
1095                                 // account for wrap-around (end < start) ranges.
1096                                 let valid_range = scalar.valid_range_mut();
1097                                 assert!(valid_range.start <= start);
1098                                 valid_range.start = start;
1099                             }
1100                             if let Bound::Included(end) = end {
1101                                 // FIXME(eddyb) this might be incorrect - it doesn't
1102                                 // account for wrap-around (end < start) ranges.
1103                                 let valid_range = scalar.valid_range_mut();
1104                                 assert!(valid_range.end >= end);
1105                                 valid_range.end = end;
1106                             }
1107
1108                             // Update `largest_niche` if we have introduced a larger niche.
1109                             let niche = if def.repr().hide_niche() {
1110                                 None
1111                             } else {
1112                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1113                             };
1114                             if let Some(niche) = niche {
1115                                 match st.largest_niche {
1116                                     Some(largest_niche) => {
1117                                         // Replace the existing niche even if they're equal,
1118                                         // because this one is at a lower offset.
1119                                         if largest_niche.available(dl) <= niche.available(dl) {
1120                                             st.largest_niche = Some(niche);
1121                                         }
1122                                     }
1123                                     None => st.largest_niche = Some(niche),
1124                                 }
1125                             }
1126                         }
1127                         _ => assert!(
1128                             start == Bound::Unbounded && end == Bound::Unbounded,
1129                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1130                             def,
1131                             st,
1132                         ),
1133                     }
1134
1135                     return Ok(tcx.intern_layout(st));
1136                 }
1137
1138                 // At this point, we have handled all unions and
1139                 // structs. (We have also handled univariant enums
1140                 // that allow representation optimization.)
1141                 assert!(def.is_enum());
1142
1143                 // The current code for niche-filling relies on variant indices
1144                 // instead of actual discriminants, so dataful enums with
1145                 // explicit discriminants (RFC #2363) would misbehave.
1146                 let no_explicit_discriminants = def
1147                     .variants()
1148                     .iter_enumerated()
1149                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1150
1151                 let mut niche_filling_layout = None;
1152
1153                 // Niche-filling enum optimization.
1154                 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1155                     let mut dataful_variant = None;
1156                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1157
1158                     // Find one non-ZST variant.
1159                     'variants: for (v, fields) in variants.iter_enumerated() {
1160                         if absent(fields) {
1161                             continue 'variants;
1162                         }
1163                         for f in fields {
1164                             if !f.is_zst() {
1165                                 if dataful_variant.is_none() {
1166                                     dataful_variant = Some(v);
1167                                     continue 'variants;
1168                                 } else {
1169                                     dataful_variant = None;
1170                                     break 'variants;
1171                                 }
1172                             }
1173                         }
1174                         niche_variants = *niche_variants.start().min(&v)..=v;
1175                     }
1176
1177                     if niche_variants.start() > niche_variants.end() {
1178                         dataful_variant = None;
1179                     }
1180
1181                     if let Some(i) = dataful_variant {
1182                         let count = (niche_variants.end().as_u32()
1183                             - niche_variants.start().as_u32()
1184                             + 1) as u128;
1185
1186                         // Find the field with the largest niche
1187                         let niche_candidate = variants[i]
1188                             .iter()
1189                             .enumerate()
1190                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1191                             .max_by_key(|(_, niche)| niche.available(dl));
1192
1193                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1194                             niche_candidate.and_then(|(field_index, niche)| {
1195                                 Some((field_index, niche, niche.reserve(self, count)?))
1196                             })
1197                         {
1198                             let mut align = dl.aggregate_align;
1199                             let st = variants
1200                                 .iter_enumerated()
1201                                 .map(|(j, v)| {
1202                                     let mut st = self.univariant_uninterned(
1203                                         ty,
1204                                         v,
1205                                         &def.repr(),
1206                                         StructKind::AlwaysSized,
1207                                     )?;
1208                                     st.variants = Variants::Single { index: j };
1209
1210                                     align = align.max(st.align);
1211
1212                                     Ok(tcx.intern_layout(st))
1213                                 })
1214                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1215
1216                             let offset = st[i].fields().offset(field_index) + niche.offset;
1217                             let size = st[i].size();
1218
1219                             let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1220                                 Abi::Uninhabited
1221                             } else {
1222                                 match st[i].abi() {
1223                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1224                                     Abi::ScalarPair(first, second) => {
1225                                         // Only the niche is guaranteed to be initialised,
1226                                         // so use union layout for the other primitive.
1227                                         if offset.bytes() == 0 {
1228                                             Abi::ScalarPair(niche_scalar, second.to_union())
1229                                         } else {
1230                                             Abi::ScalarPair(first.to_union(), niche_scalar)
1231                                         }
1232                                     }
1233                                     _ => Abi::Aggregate { sized: true },
1234                                 }
1235                             };
1236
1237                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1238
1239                             niche_filling_layout = Some(LayoutS {
1240                                 variants: Variants::Multiple {
1241                                     tag: niche_scalar,
1242                                     tag_encoding: TagEncoding::Niche {
1243                                         dataful_variant: i,
1244                                         niche_variants,
1245                                         niche_start,
1246                                     },
1247                                     tag_field: 0,
1248                                     variants: st,
1249                                 },
1250                                 fields: FieldsShape::Arbitrary {
1251                                     offsets: vec![offset],
1252                                     memory_index: vec![0],
1253                                 },
1254                                 abi,
1255                                 largest_niche,
1256                                 size,
1257                                 align,
1258                             });
1259                         }
1260                     }
1261                 }
1262
1263                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1264                 let discr_type = def.repr().discr_type();
1265                 let bits = Integer::from_attr(self, discr_type).size().bits();
1266                 for (i, discr) in def.discriminants(tcx) {
1267                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1268                         continue;
1269                     }
1270                     let mut x = discr.val as i128;
1271                     if discr_type.is_signed() {
1272                         // sign extend the raw representation to be an i128
1273                         x = (x << (128 - bits)) >> (128 - bits);
1274                     }
1275                     if x < min {
1276                         min = x;
1277                     }
1278                     if x > max {
1279                         max = x;
1280                     }
1281                 }
1282                 // We might have no inhabited variants, so pretend there's at least one.
1283                 if (min, max) == (i128::MAX, i128::MIN) {
1284                     min = 0;
1285                     max = 0;
1286                 }
1287                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1288                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1289
1290                 let mut align = dl.aggregate_align;
1291                 let mut size = Size::ZERO;
1292
1293                 // We're interested in the smallest alignment, so start large.
1294                 let mut start_align = Align::from_bytes(256).unwrap();
1295                 assert_eq!(Integer::for_align(dl, start_align), None);
1296
1297                 // repr(C) on an enum tells us to make a (tag, union) layout,
1298                 // so we need to grow the prefix alignment to be at least
1299                 // the alignment of the union. (This value is used both for
1300                 // determining the alignment of the overall enum, and the
1301                 // determining the alignment of the payload after the tag.)
1302                 let mut prefix_align = min_ity.align(dl).abi;
1303                 if def.repr().c() {
1304                     for fields in &variants {
1305                         for field in fields {
1306                             prefix_align = prefix_align.max(field.align.abi);
1307                         }
1308                     }
1309                 }
1310
1311                 // Create the set of structs that represent each variant.
1312                 let mut layout_variants = variants
1313                     .iter_enumerated()
1314                     .map(|(i, field_layouts)| {
1315                         let mut st = self.univariant_uninterned(
1316                             ty,
1317                             &field_layouts,
1318                             &def.repr(),
1319                             StructKind::Prefixed(min_ity.size(), prefix_align),
1320                         )?;
1321                         st.variants = Variants::Single { index: i };
1322                         // Find the first field we can't move later
1323                         // to make room for a larger discriminant.
1324                         for field in
1325                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1326                         {
1327                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1328                                 start_align = start_align.min(field.align.abi);
1329                                 break;
1330                             }
1331                         }
1332                         size = cmp::max(size, st.size);
1333                         align = align.max(st.align);
1334                         Ok(st)
1335                     })
1336                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1337
1338                 // Align the maximum variant size to the largest alignment.
1339                 size = size.align_to(align.abi);
1340
1341                 if size.bytes() >= dl.obj_size_bound() {
1342                     return Err(LayoutError::SizeOverflow(ty));
1343                 }
1344
1345                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1346                 if typeck_ity < min_ity {
1347                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1348                     // some reason at this point (based on values discriminant can take on). Mostly
1349                     // because this discriminant will be loaded, and then stored into variable of
1350                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1351                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1352                     // discriminant values. That would be a bug, because then, in codegen, in order
1353                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1354                     // space necessary to represent would have to be discarded (or layout is wrong
1355                     // on thinking it needs 16 bits)
1356                     bug!(
1357                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1358                         min_ity,
1359                         typeck_ity
1360                     );
1361                     // However, it is fine to make discr type however large (as an optimisation)
1362                     // after this point â€“ we’ll just truncate the value we load in codegen.
1363                 }
1364
1365                 // Check to see if we should use a different type for the
1366                 // discriminant. We can safely use a type with the same size
1367                 // as the alignment of the first field of each variant.
1368                 // We increase the size of the discriminant to avoid LLVM copying
1369                 // padding when it doesn't need to. This normally causes unaligned
1370                 // load/stores and excessive memcpy/memset operations. By using a
1371                 // bigger integer size, LLVM can be sure about its contents and
1372                 // won't be so conservative.
1373
1374                 // Use the initial field alignment
1375                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1376                     min_ity
1377                 } else {
1378                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1379                 };
1380
1381                 // If the alignment is not larger than the chosen discriminant size,
1382                 // don't use the alignment as the final size.
1383                 if ity <= min_ity {
1384                     ity = min_ity;
1385                 } else {
1386                     // Patch up the variants' first few fields.
1387                     let old_ity_size = min_ity.size();
1388                     let new_ity_size = ity.size();
1389                     for variant in &mut layout_variants {
1390                         match variant.fields {
1391                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1392                                 for i in offsets {
1393                                     if *i <= old_ity_size {
1394                                         assert_eq!(*i, old_ity_size);
1395                                         *i = new_ity_size;
1396                                     }
1397                                 }
1398                                 // We might be making the struct larger.
1399                                 if variant.size <= old_ity_size {
1400                                     variant.size = new_ity_size;
1401                                 }
1402                             }
1403                             _ => bug!(),
1404                         }
1405                     }
1406                 }
1407
1408                 let tag_mask = ity.size().unsigned_int_max();
1409                 let tag = Scalar::Initialized {
1410                     value: Int(ity, signed),
1411                     valid_range: WrappingRange {
1412                         start: (min as u128 & tag_mask),
1413                         end: (max as u128 & tag_mask),
1414                     },
1415                 };
1416                 let mut abi = Abi::Aggregate { sized: true };
1417
1418                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1419                     abi = Abi::Uninhabited;
1420                 } else if tag.size(dl) == size {
1421                     // Make sure we only use scalar layout when the enum is entirely its
1422                     // own tag (i.e. it has no padding nor any non-ZST variant fields).
1423                     abi = Abi::Scalar(tag);
1424                 } else {
1425                     // Try to use a ScalarPair for all tagged enums.
1426                     let mut common_prim = None;
1427                     let mut common_prim_initialized_in_all_variants = true;
1428                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1429                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1430                             bug!();
1431                         };
1432                         let mut fields =
1433                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1434                         let (field, offset) = match (fields.next(), fields.next()) {
1435                             (None, None) => {
1436                                 common_prim_initialized_in_all_variants = false;
1437                                 continue;
1438                             }
1439                             (Some(pair), None) => pair,
1440                             _ => {
1441                                 common_prim = None;
1442                                 break;
1443                             }
1444                         };
1445                         let prim = match field.abi {
1446                             Abi::Scalar(scalar) => {
1447                                 common_prim_initialized_in_all_variants &=
1448                                     matches!(scalar, Scalar::Initialized { .. });
1449                                 scalar.primitive()
1450                             }
1451                             _ => {
1452                                 common_prim = None;
1453                                 break;
1454                             }
1455                         };
1456                         if let Some(pair) = common_prim {
1457                             // This is pretty conservative. We could go fancier
1458                             // by conflating things like i32 and u32, or even
1459                             // realising that (u8, u8) could just cohabit with
1460                             // u16 or even u32.
1461                             if pair != (prim, offset) {
1462                                 common_prim = None;
1463                                 break;
1464                             }
1465                         } else {
1466                             common_prim = Some((prim, offset));
1467                         }
1468                     }
1469                     if let Some((prim, offset)) = common_prim {
1470                         let prim_scalar = if common_prim_initialized_in_all_variants {
1471                             scalar_unit(prim)
1472                         } else {
1473                             // Common prim might be uninit.
1474                             Scalar::Union { value: prim }
1475                         };
1476                         let pair = self.scalar_pair(tag, prim_scalar);
1477                         let pair_offsets = match pair.fields {
1478                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1479                                 assert_eq!(memory_index, &[0, 1]);
1480                                 offsets
1481                             }
1482                             _ => bug!(),
1483                         };
1484                         if pair_offsets[0] == Size::ZERO
1485                             && pair_offsets[1] == *offset
1486                             && align == pair.align
1487                             && size == pair.size
1488                         {
1489                             // We can use `ScalarPair` only when it matches our
1490                             // already computed layout (including `#[repr(C)]`).
1491                             abi = pair.abi;
1492                         }
1493                     }
1494                 }
1495
1496                 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1497                 // variants to ensure they are consistent. This is because a downcast is
1498                 // semantically a NOP, and thus should not affect layout.
1499                 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1500                     for variant in &mut layout_variants {
1501                         // We only do this for variants with fields; the others are not accessed anyway.
1502                         // Also do not overwrite any already existing "clever" ABIs.
1503                         if variant.fields.count() > 0
1504                             && matches!(variant.abi, Abi::Aggregate { .. })
1505                         {
1506                             variant.abi = abi;
1507                             // Also need to bump up the size and alignment, so that the entire value fits in here.
1508                             variant.size = cmp::max(variant.size, size);
1509                             variant.align.abi = cmp::max(variant.align.abi, align.abi);
1510                         }
1511                     }
1512                 }
1513
1514                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1515
1516                 let layout_variants =
1517                     layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1518
1519                 let tagged_layout = LayoutS {
1520                     variants: Variants::Multiple {
1521                         tag,
1522                         tag_encoding: TagEncoding::Direct,
1523                         tag_field: 0,
1524                         variants: layout_variants,
1525                     },
1526                     fields: FieldsShape::Arbitrary {
1527                         offsets: vec![Size::ZERO],
1528                         memory_index: vec![0],
1529                     },
1530                     largest_niche,
1531                     abi,
1532                     align,
1533                     size,
1534                 };
1535
1536                 let best_layout = match (tagged_layout, niche_filling_layout) {
1537                     (tagged_layout, Some(niche_filling_layout)) => {
1538                         // Pick the smaller layout; otherwise,
1539                         // pick the layout with the larger niche; otherwise,
1540                         // pick tagged as it has simpler codegen.
1541                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1542                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1543                             (layout.size, cmp::Reverse(niche_size))
1544                         })
1545                     }
1546                     (tagged_layout, None) => tagged_layout,
1547                 };
1548
1549                 tcx.intern_layout(best_layout)
1550             }
1551
1552             // Types with no meaningful known layout.
1553             ty::Projection(_) | ty::Opaque(..) => {
1554                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1555                 // if that was possible, so there's no reason to try again here.
1556                 return Err(LayoutError::Unknown(ty));
1557             }
1558
1559             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1560                 bug!("Layout::compute: unexpected type `{}`", ty)
1561             }
1562
1563             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1564                 return Err(LayoutError::Unknown(ty));
1565             }
1566         })
1567     }
1568 }
1569
1570 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1571 #[derive(Clone, Debug, PartialEq)]
1572 enum SavedLocalEligibility {
1573     Unassigned,
1574     Assigned(VariantIdx),
1575     // FIXME: Use newtype_index so we aren't wasting bytes
1576     Ineligible(Option<u32>),
1577 }
1578
1579 // When laying out generators, we divide our saved local fields into two
1580 // categories: overlap-eligible and overlap-ineligible.
1581 //
1582 // Those fields which are ineligible for overlap go in a "prefix" at the
1583 // beginning of the layout, and always have space reserved for them.
1584 //
1585 // Overlap-eligible fields are only assigned to one variant, so we lay
1586 // those fields out for each variant and put them right after the
1587 // prefix.
1588 //
1589 // Finally, in the layout details, we point to the fields from the
1590 // variants they are assigned to. It is possible for some fields to be
1591 // included in multiple variants. No field ever "moves around" in the
1592 // layout; its offset is always the same.
1593 //
1594 // Also included in the layout are the upvars and the discriminant.
1595 // These are included as fields on the "outer" layout; they are not part
1596 // of any variant.
1597 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1598     /// Compute the eligibility and assignment of each local.
1599     fn generator_saved_local_eligibility(
1600         &self,
1601         info: &GeneratorLayout<'tcx>,
1602     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1603         use SavedLocalEligibility::*;
1604
1605         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1606             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1607
1608         // The saved locals not eligible for overlap. These will get
1609         // "promoted" to the prefix of our generator.
1610         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1611
1612         // Figure out which of our saved locals are fields in only
1613         // one variant. The rest are deemed ineligible for overlap.
1614         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1615             for local in fields {
1616                 match assignments[*local] {
1617                     Unassigned => {
1618                         assignments[*local] = Assigned(variant_index);
1619                     }
1620                     Assigned(idx) => {
1621                         // We've already seen this local at another suspension
1622                         // point, so it is no longer a candidate.
1623                         trace!(
1624                             "removing local {:?} in >1 variant ({:?}, {:?})",
1625                             local,
1626                             variant_index,
1627                             idx
1628                         );
1629                         ineligible_locals.insert(*local);
1630                         assignments[*local] = Ineligible(None);
1631                     }
1632                     Ineligible(_) => {}
1633                 }
1634             }
1635         }
1636
1637         // Next, check every pair of eligible locals to see if they
1638         // conflict.
1639         for local_a in info.storage_conflicts.rows() {
1640             let conflicts_a = info.storage_conflicts.count(local_a);
1641             if ineligible_locals.contains(local_a) {
1642                 continue;
1643             }
1644
1645             for local_b in info.storage_conflicts.iter(local_a) {
1646                 // local_a and local_b are storage live at the same time, therefore they
1647                 // cannot overlap in the generator layout. The only way to guarantee
1648                 // this is if they are in the same variant, or one is ineligible
1649                 // (which means it is stored in every variant).
1650                 if ineligible_locals.contains(local_b)
1651                     || assignments[local_a] == assignments[local_b]
1652                 {
1653                     continue;
1654                 }
1655
1656                 // If they conflict, we will choose one to make ineligible.
1657                 // This is not always optimal; it's just a greedy heuristic that
1658                 // seems to produce good results most of the time.
1659                 let conflicts_b = info.storage_conflicts.count(local_b);
1660                 let (remove, other) =
1661                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1662                 ineligible_locals.insert(remove);
1663                 assignments[remove] = Ineligible(None);
1664                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1665             }
1666         }
1667
1668         // Count the number of variants in use. If only one of them, then it is
1669         // impossible to overlap any locals in our layout. In this case it's
1670         // always better to make the remaining locals ineligible, so we can
1671         // lay them out with the other locals in the prefix and eliminate
1672         // unnecessary padding bytes.
1673         {
1674             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1675             for assignment in &assignments {
1676                 if let Assigned(idx) = assignment {
1677                     used_variants.insert(*idx);
1678                 }
1679             }
1680             if used_variants.count() < 2 {
1681                 for assignment in assignments.iter_mut() {
1682                     *assignment = Ineligible(None);
1683                 }
1684                 ineligible_locals.insert_all();
1685             }
1686         }
1687
1688         // Write down the order of our locals that will be promoted to the prefix.
1689         {
1690             for (idx, local) in ineligible_locals.iter().enumerate() {
1691                 assignments[local] = Ineligible(Some(idx as u32));
1692             }
1693         }
1694         debug!("generator saved local assignments: {:?}", assignments);
1695
1696         (ineligible_locals, assignments)
1697     }
1698
1699     /// Compute the full generator layout.
1700     fn generator_layout(
1701         &self,
1702         ty: Ty<'tcx>,
1703         def_id: hir::def_id::DefId,
1704         substs: SubstsRef<'tcx>,
1705     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1706         use SavedLocalEligibility::*;
1707         let tcx = self.tcx;
1708         let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1709
1710         let Some(info) = tcx.generator_layout(def_id) else {
1711             return Err(LayoutError::Unknown(ty));
1712         };
1713         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1714
1715         // Build a prefix layout, including "promoting" all ineligible
1716         // locals as part of the prefix. We compute the layout of all of
1717         // these fields at once to get optimal packing.
1718         let tag_index = substs.as_generator().prefix_tys().count();
1719
1720         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1721         let max_discr = (info.variant_fields.len() - 1) as u128;
1722         let discr_int = Integer::fit_unsigned(max_discr);
1723         let discr_int_ty = discr_int.to_ty(tcx, false);
1724         let tag = Scalar::Initialized {
1725             value: Primitive::Int(discr_int, false),
1726             valid_range: WrappingRange { start: 0, end: max_discr },
1727         };
1728         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1729         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1730
1731         let promoted_layouts = ineligible_locals
1732             .iter()
1733             .map(|local| subst_field(info.field_tys[local]))
1734             .map(|ty| tcx.mk_maybe_uninit(ty))
1735             .map(|ty| self.layout_of(ty));
1736         let prefix_layouts = substs
1737             .as_generator()
1738             .prefix_tys()
1739             .map(|ty| self.layout_of(ty))
1740             .chain(iter::once(Ok(tag_layout)))
1741             .chain(promoted_layouts)
1742             .collect::<Result<Vec<_>, _>>()?;
1743         let prefix = self.univariant_uninterned(
1744             ty,
1745             &prefix_layouts,
1746             &ReprOptions::default(),
1747             StructKind::AlwaysSized,
1748         )?;
1749
1750         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1751
1752         // Split the prefix layout into the "outer" fields (upvars and
1753         // discriminant) and the "promoted" fields. Promoted fields will
1754         // get included in each variant that requested them in
1755         // GeneratorLayout.
1756         debug!("prefix = {:#?}", prefix);
1757         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1758             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1759                 let mut inverse_memory_index = invert_mapping(&memory_index);
1760
1761                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1762                 // "outer" and "promoted" fields respectively.
1763                 let b_start = (tag_index + 1) as u32;
1764                 let offsets_b = offsets.split_off(b_start as usize);
1765                 let offsets_a = offsets;
1766
1767                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1768                 // by preserving the order but keeping only one disjoint "half" each.
1769                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1770                 let inverse_memory_index_b: Vec<_> =
1771                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1772                 inverse_memory_index.retain(|&i| i < b_start);
1773                 let inverse_memory_index_a = inverse_memory_index;
1774
1775                 // Since `inverse_memory_index_{a,b}` each only refer to their
1776                 // respective fields, they can be safely inverted
1777                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1778                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1779
1780                 let outer_fields =
1781                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1782                 (outer_fields, offsets_b, memory_index_b)
1783             }
1784             _ => bug!(),
1785         };
1786
1787         let mut size = prefix.size;
1788         let mut align = prefix.align;
1789         let variants = info
1790             .variant_fields
1791             .iter_enumerated()
1792             .map(|(index, variant_fields)| {
1793                 // Only include overlap-eligible fields when we compute our variant layout.
1794                 let variant_only_tys = variant_fields
1795                     .iter()
1796                     .filter(|local| match assignments[**local] {
1797                         Unassigned => bug!(),
1798                         Assigned(v) if v == index => true,
1799                         Assigned(_) => bug!("assignment does not match variant"),
1800                         Ineligible(_) => false,
1801                     })
1802                     .map(|local| subst_field(info.field_tys[*local]));
1803
1804                 let mut variant = self.univariant_uninterned(
1805                     ty,
1806                     &variant_only_tys
1807                         .map(|ty| self.layout_of(ty))
1808                         .collect::<Result<Vec<_>, _>>()?,
1809                     &ReprOptions::default(),
1810                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1811                 )?;
1812                 variant.variants = Variants::Single { index };
1813
1814                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1815                     bug!();
1816                 };
1817
1818                 // Now, stitch the promoted and variant-only fields back together in
1819                 // the order they are mentioned by our GeneratorLayout.
1820                 // Because we only use some subset (that can differ between variants)
1821                 // of the promoted fields, we can't just pick those elements of the
1822                 // `promoted_memory_index` (as we'd end up with gaps).
1823                 // So instead, we build an "inverse memory_index", as if all of the
1824                 // promoted fields were being used, but leave the elements not in the
1825                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1826                 // obtain a valid (bijective) mapping.
1827                 const INVALID_FIELD_IDX: u32 = !0;
1828                 let mut combined_inverse_memory_index =
1829                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1830                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1831                 let combined_offsets = variant_fields
1832                     .iter()
1833                     .enumerate()
1834                     .map(|(i, local)| {
1835                         let (offset, memory_index) = match assignments[*local] {
1836                             Unassigned => bug!(),
1837                             Assigned(_) => {
1838                                 let (offset, memory_index) =
1839                                     offsets_and_memory_index.next().unwrap();
1840                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1841                             }
1842                             Ineligible(field_idx) => {
1843                                 let field_idx = field_idx.unwrap() as usize;
1844                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1845                             }
1846                         };
1847                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1848                         offset
1849                     })
1850                     .collect();
1851
1852                 // Remove the unused slots and invert the mapping to obtain the
1853                 // combined `memory_index` (also see previous comment).
1854                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1855                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1856
1857                 variant.fields = FieldsShape::Arbitrary {
1858                     offsets: combined_offsets,
1859                     memory_index: combined_memory_index,
1860                 };
1861
1862                 size = size.max(variant.size);
1863                 align = align.max(variant.align);
1864                 Ok(tcx.intern_layout(variant))
1865             })
1866             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1867
1868         size = size.align_to(align.abi);
1869
1870         let abi =
1871             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1872                 Abi::Uninhabited
1873             } else {
1874                 Abi::Aggregate { sized: true }
1875             };
1876
1877         let layout = tcx.intern_layout(LayoutS {
1878             variants: Variants::Multiple {
1879                 tag,
1880                 tag_encoding: TagEncoding::Direct,
1881                 tag_field: tag_index,
1882                 variants,
1883             },
1884             fields: outer_fields,
1885             abi,
1886             largest_niche: prefix.largest_niche,
1887             size,
1888             align,
1889         });
1890         debug!("generator layout ({:?}): {:#?}", ty, layout);
1891         Ok(layout)
1892     }
1893
1894     /// This is invoked by the `layout_of` query to record the final
1895     /// layout of each type.
1896     #[inline(always)]
1897     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1898         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1899         // for dumping later.
1900         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1901             self.record_layout_for_printing_outlined(layout)
1902         }
1903     }
1904
1905     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1906         // Ignore layouts that are done with non-empty environments or
1907         // non-monomorphic layouts, as the user only wants to see the stuff
1908         // resulting from the final codegen session.
1909         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1910             return;
1911         }
1912
1913         // (delay format until we actually need it)
1914         let record = |kind, packed, opt_discr_size, variants| {
1915             let type_desc = format!("{:?}", layout.ty);
1916             self.tcx.sess.code_stats.record_type_size(
1917                 kind,
1918                 type_desc,
1919                 layout.align.abi,
1920                 layout.size,
1921                 packed,
1922                 opt_discr_size,
1923                 variants,
1924             );
1925         };
1926
1927         let adt_def = match *layout.ty.kind() {
1928             ty::Adt(ref adt_def, _) => {
1929                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1930                 adt_def
1931             }
1932
1933             ty::Closure(..) => {
1934                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1935                 record(DataTypeKind::Closure, false, None, vec![]);
1936                 return;
1937             }
1938
1939             _ => {
1940                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1941                 return;
1942             }
1943         };
1944
1945         let adt_kind = adt_def.adt_kind();
1946         let adt_packed = adt_def.repr().pack.is_some();
1947
1948         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1949             let mut min_size = Size::ZERO;
1950             let field_info: Vec<_> = flds
1951                 .iter()
1952                 .enumerate()
1953                 .map(|(i, &name)| {
1954                     let field_layout = layout.field(self, i);
1955                     let offset = layout.fields.offset(i);
1956                     let field_end = offset + field_layout.size;
1957                     if min_size < field_end {
1958                         min_size = field_end;
1959                     }
1960                     FieldInfo {
1961                         name: name.to_string(),
1962                         offset: offset.bytes(),
1963                         size: field_layout.size.bytes(),
1964                         align: field_layout.align.abi.bytes(),
1965                     }
1966                 })
1967                 .collect();
1968
1969             VariantInfo {
1970                 name: n.map(|n| n.to_string()),
1971                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1972                 align: layout.align.abi.bytes(),
1973                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1974                 fields: field_info,
1975             }
1976         };
1977
1978         match layout.variants {
1979             Variants::Single { index } => {
1980                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1981                     debug!(
1982                         "print-type-size `{:#?}` variant {}",
1983                         layout,
1984                         adt_def.variant(index).name
1985                     );
1986                     let variant_def = &adt_def.variant(index);
1987                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1988                     record(
1989                         adt_kind.into(),
1990                         adt_packed,
1991                         None,
1992                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1993                     );
1994                 } else {
1995                     // (This case arises for *empty* enums; so give it
1996                     // zero variants.)
1997                     record(adt_kind.into(), adt_packed, None, vec![]);
1998                 }
1999             }
2000
2001             Variants::Multiple { tag, ref tag_encoding, .. } => {
2002                 debug!(
2003                     "print-type-size `{:#?}` adt general variants def {}",
2004                     layout.ty,
2005                     adt_def.variants().len()
2006                 );
2007                 let variant_infos: Vec<_> = adt_def
2008                     .variants()
2009                     .iter_enumerated()
2010                     .map(|(i, variant_def)| {
2011                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2012                         build_variant_info(
2013                             Some(variant_def.name),
2014                             &fields,
2015                             layout.for_variant(self, i),
2016                         )
2017                     })
2018                     .collect();
2019                 record(
2020                     adt_kind.into(),
2021                     adt_packed,
2022                     match tag_encoding {
2023                         TagEncoding::Direct => Some(tag.size(self)),
2024                         _ => None,
2025                     },
2026                     variant_infos,
2027                 );
2028             }
2029         }
2030     }
2031 }
2032
2033 /// Type size "skeleton", i.e., the only information determining a type's size.
2034 /// While this is conservative, (aside from constant sizes, only pointers,
2035 /// newtypes thereof and null pointer optimized enums are allowed), it is
2036 /// enough to statically check common use cases of transmute.
2037 #[derive(Copy, Clone, Debug)]
2038 pub enum SizeSkeleton<'tcx> {
2039     /// Any statically computable Layout.
2040     Known(Size),
2041
2042     /// A potentially-fat pointer.
2043     Pointer {
2044         /// If true, this pointer is never null.
2045         non_zero: bool,
2046         /// The type which determines the unsized metadata, if any,
2047         /// of this pointer. Either a type parameter or a projection
2048         /// depending on one, with regions erased.
2049         tail: Ty<'tcx>,
2050     },
2051 }
2052
2053 impl<'tcx> SizeSkeleton<'tcx> {
2054     pub fn compute(
2055         ty: Ty<'tcx>,
2056         tcx: TyCtxt<'tcx>,
2057         param_env: ty::ParamEnv<'tcx>,
2058     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2059         debug_assert!(!ty.has_infer_types_or_consts());
2060
2061         // First try computing a static layout.
2062         let err = match tcx.layout_of(param_env.and(ty)) {
2063             Ok(layout) => {
2064                 return Ok(SizeSkeleton::Known(layout.size));
2065             }
2066             Err(err) => err,
2067         };
2068
2069         match *ty.kind() {
2070             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2071                 let non_zero = !ty.is_unsafe_ptr();
2072                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2073                 match tail.kind() {
2074                     ty::Param(_) | ty::Projection(_) => {
2075                         debug_assert!(tail.has_param_types_or_consts());
2076                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2077                     }
2078                     _ => bug!(
2079                         "SizeSkeleton::compute({}): layout errored ({}), yet \
2080                               tail `{}` is not a type parameter or a projection",
2081                         ty,
2082                         err,
2083                         tail
2084                     ),
2085                 }
2086             }
2087
2088             ty::Adt(def, substs) => {
2089                 // Only newtypes and enums w/ nullable pointer optimization.
2090                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2091                     return Err(err);
2092                 }
2093
2094                 // Get a zero-sized variant or a pointer newtype.
2095                 let zero_or_ptr_variant = |i| {
2096                     let i = VariantIdx::new(i);
2097                     let fields =
2098                         def.variant(i).fields.iter().map(|field| {
2099                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2100                         });
2101                     let mut ptr = None;
2102                     for field in fields {
2103                         let field = field?;
2104                         match field {
2105                             SizeSkeleton::Known(size) => {
2106                                 if size.bytes() > 0 {
2107                                     return Err(err);
2108                                 }
2109                             }
2110                             SizeSkeleton::Pointer { .. } => {
2111                                 if ptr.is_some() {
2112                                     return Err(err);
2113                                 }
2114                                 ptr = Some(field);
2115                             }
2116                         }
2117                     }
2118                     Ok(ptr)
2119                 };
2120
2121                 let v0 = zero_or_ptr_variant(0)?;
2122                 // Newtype.
2123                 if def.variants().len() == 1 {
2124                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2125                         return Ok(SizeSkeleton::Pointer {
2126                             non_zero: non_zero
2127                                 || match tcx.layout_scalar_valid_range(def.did()) {
2128                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2129                                     (Bound::Included(start), Bound::Included(end)) => {
2130                                         0 < start && start < end
2131                                     }
2132                                     _ => false,
2133                                 },
2134                             tail,
2135                         });
2136                     } else {
2137                         return Err(err);
2138                     }
2139                 }
2140
2141                 let v1 = zero_or_ptr_variant(1)?;
2142                 // Nullable pointer enum optimization.
2143                 match (v0, v1) {
2144                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2145                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2146                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2147                     }
2148                     _ => Err(err),
2149                 }
2150             }
2151
2152             ty::Projection(_) | ty::Opaque(..) => {
2153                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2154                 if ty == normalized {
2155                     Err(err)
2156                 } else {
2157                     SizeSkeleton::compute(normalized, tcx, param_env)
2158                 }
2159             }
2160
2161             _ => Err(err),
2162         }
2163     }
2164
2165     pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2166         match (self, other) {
2167             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2168             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2169                 a == b
2170             }
2171             _ => false,
2172         }
2173     }
2174 }
2175
2176 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2177     fn tcx(&self) -> TyCtxt<'tcx>;
2178 }
2179
2180 pub trait HasParamEnv<'tcx> {
2181     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2182 }
2183
2184 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2185     #[inline]
2186     fn data_layout(&self) -> &TargetDataLayout {
2187         &self.data_layout
2188     }
2189 }
2190
2191 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2192     fn target_spec(&self) -> &Target {
2193         &self.sess.target
2194     }
2195 }
2196
2197 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2198     #[inline]
2199     fn tcx(&self) -> TyCtxt<'tcx> {
2200         *self
2201     }
2202 }
2203
2204 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2205     #[inline]
2206     fn data_layout(&self) -> &TargetDataLayout {
2207         &self.data_layout
2208     }
2209 }
2210
2211 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2212     fn target_spec(&self) -> &Target {
2213         &self.sess.target
2214     }
2215 }
2216
2217 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2218     #[inline]
2219     fn tcx(&self) -> TyCtxt<'tcx> {
2220         **self
2221     }
2222 }
2223
2224 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2225     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2226         self.param_env
2227     }
2228 }
2229
2230 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2231     fn data_layout(&self) -> &TargetDataLayout {
2232         self.tcx.data_layout()
2233     }
2234 }
2235
2236 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2237     fn target_spec(&self) -> &Target {
2238         self.tcx.target_spec()
2239     }
2240 }
2241
2242 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2243     fn tcx(&self) -> TyCtxt<'tcx> {
2244         self.tcx.tcx()
2245     }
2246 }
2247
2248 pub trait MaybeResult<T> {
2249     type Error;
2250
2251     fn from(x: Result<T, Self::Error>) -> Self;
2252     fn to_result(self) -> Result<T, Self::Error>;
2253 }
2254
2255 impl<T> MaybeResult<T> for T {
2256     type Error = !;
2257
2258     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2259         x
2260     }
2261     fn to_result(self) -> Result<T, Self::Error> {
2262         Ok(self)
2263     }
2264 }
2265
2266 impl<T, E> MaybeResult<T> for Result<T, E> {
2267     type Error = E;
2268
2269     fn from(x: Result<T, Self::Error>) -> Self {
2270         x
2271     }
2272     fn to_result(self) -> Result<T, Self::Error> {
2273         self
2274     }
2275 }
2276
2277 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2278
2279 /// Trait for contexts that want to be able to compute layouts of types.
2280 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2281 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2282     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2283     /// returned from `layout_of` (see also `handle_layout_err`).
2284     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2285
2286     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2287     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2288     #[inline]
2289     fn layout_tcx_at_span(&self) -> Span {
2290         DUMMY_SP
2291     }
2292
2293     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2294     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2295     ///
2296     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2297     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2298     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2299     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2300     fn handle_layout_err(
2301         &self,
2302         err: LayoutError<'tcx>,
2303         span: Span,
2304         ty: Ty<'tcx>,
2305     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2306 }
2307
2308 /// Blanket extension trait for contexts that can compute layouts of types.
2309 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2310     /// Computes the layout of a type. Note that this implicitly
2311     /// executes in "reveal all" mode, and will normalize the input type.
2312     #[inline]
2313     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2314         self.spanned_layout_of(ty, DUMMY_SP)
2315     }
2316
2317     /// Computes the layout of a type, at `span`. Note that this implicitly
2318     /// executes in "reveal all" mode, and will normalize the input type.
2319     // FIXME(eddyb) avoid passing information like this, and instead add more
2320     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2321     #[inline]
2322     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2323         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2324         let tcx = self.tcx().at(span);
2325
2326         MaybeResult::from(
2327             tcx.layout_of(self.param_env().and(ty))
2328                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2329         )
2330     }
2331 }
2332
2333 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2334
2335 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2336     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2337
2338     #[inline]
2339     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2340         err
2341     }
2342 }
2343
2344 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2345     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2346
2347     #[inline]
2348     fn layout_tcx_at_span(&self) -> Span {
2349         self.tcx.span
2350     }
2351
2352     #[inline]
2353     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2354         err
2355     }
2356 }
2357
2358 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2359 where
2360     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2361 {
2362     fn ty_and_layout_for_variant(
2363         this: TyAndLayout<'tcx>,
2364         cx: &C,
2365         variant_index: VariantIdx,
2366     ) -> TyAndLayout<'tcx> {
2367         let layout = match this.variants {
2368             Variants::Single { index }
2369                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2370                 if index == variant_index &&
2371                 // Don't confuse variants of uninhabited enums with the enum itself.
2372                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2373                 this.fields != FieldsShape::Primitive =>
2374             {
2375                 this.layout
2376             }
2377
2378             Variants::Single { index } => {
2379                 let tcx = cx.tcx();
2380                 let param_env = cx.param_env();
2381
2382                 // Deny calling for_variant more than once for non-Single enums.
2383                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2384                     assert_eq!(original_layout.variants, Variants::Single { index });
2385                 }
2386
2387                 let fields = match this.ty.kind() {
2388                     ty::Adt(def, _) if def.variants().is_empty() =>
2389                         bug!("for_variant called on zero-variant enum"),
2390                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2391                     _ => bug!(),
2392                 };
2393                 tcx.intern_layout(LayoutS {
2394                     variants: Variants::Single { index: variant_index },
2395                     fields: match NonZeroUsize::new(fields) {
2396                         Some(fields) => FieldsShape::Union(fields),
2397                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2398                     },
2399                     abi: Abi::Uninhabited,
2400                     largest_niche: None,
2401                     align: tcx.data_layout.i8_align,
2402                     size: Size::ZERO,
2403                 })
2404             }
2405
2406             Variants::Multiple { ref variants, .. } => variants[variant_index],
2407         };
2408
2409         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2410
2411         TyAndLayout { ty: this.ty, layout }
2412     }
2413
2414     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2415         enum TyMaybeWithLayout<'tcx> {
2416             Ty(Ty<'tcx>),
2417             TyAndLayout(TyAndLayout<'tcx>),
2418         }
2419
2420         fn field_ty_or_layout<'tcx>(
2421             this: TyAndLayout<'tcx>,
2422             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2423             i: usize,
2424         ) -> TyMaybeWithLayout<'tcx> {
2425             let tcx = cx.tcx();
2426             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2427                 TyAndLayout {
2428                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2429                     ty: tag.primitive().to_ty(tcx),
2430                 }
2431             };
2432
2433             match *this.ty.kind() {
2434                 ty::Bool
2435                 | ty::Char
2436                 | ty::Int(_)
2437                 | ty::Uint(_)
2438                 | ty::Float(_)
2439                 | ty::FnPtr(_)
2440                 | ty::Never
2441                 | ty::FnDef(..)
2442                 | ty::GeneratorWitness(..)
2443                 | ty::Foreign(..)
2444                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2445
2446                 // Potentially-fat pointers.
2447                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2448                     assert!(i < this.fields.count());
2449
2450                     // Reuse the fat `*T` type as its own thin pointer data field.
2451                     // This provides information about, e.g., DST struct pointees
2452                     // (which may have no non-DST form), and will work as long
2453                     // as the `Abi` or `FieldsShape` is checked by users.
2454                     if i == 0 {
2455                         let nil = tcx.mk_unit();
2456                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2457                             tcx.mk_mut_ptr(nil)
2458                         } else {
2459                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2460                         };
2461
2462                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2463                         // the `Result` should always work because the type is
2464                         // always either `*mut ()` or `&'static mut ()`.
2465                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2466                             ty: this.ty,
2467                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2468                         });
2469                     }
2470
2471                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2472                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2473                         ty::Dynamic(_, _) => {
2474                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2475                                 tcx.lifetimes.re_static,
2476                                 tcx.mk_array(tcx.types.usize, 3),
2477                             ))
2478                             /* FIXME: use actual fn pointers
2479                             Warning: naively computing the number of entries in the
2480                             vtable by counting the methods on the trait + methods on
2481                             all parent traits does not work, because some methods can
2482                             be not object safe and thus excluded from the vtable.
2483                             Increase this counter if you tried to implement this but
2484                             failed to do it without duplicating a lot of code from
2485                             other places in the compiler: 2
2486                             tcx.mk_tup(&[
2487                                 tcx.mk_array(tcx.types.usize, 3),
2488                                 tcx.mk_array(Option<fn()>),
2489                             ])
2490                             */
2491                         }
2492                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2493                     }
2494                 }
2495
2496                 // Arrays and slices.
2497                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2498                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2499
2500                 // Tuples, generators and closures.
2501                 ty::Closure(_, ref substs) => field_ty_or_layout(
2502                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2503                     cx,
2504                     i,
2505                 ),
2506
2507                 ty::Generator(def_id, ref substs, _) => match this.variants {
2508                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2509                         substs
2510                             .as_generator()
2511                             .state_tys(def_id, tcx)
2512                             .nth(index.as_usize())
2513                             .unwrap()
2514                             .nth(i)
2515                             .unwrap(),
2516                     ),
2517                     Variants::Multiple { tag, tag_field, .. } => {
2518                         if i == tag_field {
2519                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2520                         }
2521                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2522                     }
2523                 },
2524
2525                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2526
2527                 // ADTs.
2528                 ty::Adt(def, substs) => {
2529                     match this.variants {
2530                         Variants::Single { index } => {
2531                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2532                         }
2533
2534                         // Discriminant field for enums (where applicable).
2535                         Variants::Multiple { tag, .. } => {
2536                             assert_eq!(i, 0);
2537                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2538                         }
2539                     }
2540                 }
2541
2542                 ty::Projection(_)
2543                 | ty::Bound(..)
2544                 | ty::Placeholder(..)
2545                 | ty::Opaque(..)
2546                 | ty::Param(_)
2547                 | ty::Infer(_)
2548                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2549             }
2550         }
2551
2552         match field_ty_or_layout(this, cx, i) {
2553             TyMaybeWithLayout::Ty(field_ty) => {
2554                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2555                     bug!(
2556                         "failed to get layout for `{}`: {},\n\
2557                          despite it being a field (#{}) of an existing layout: {:#?}",
2558                         field_ty,
2559                         e,
2560                         i,
2561                         this
2562                     )
2563                 })
2564             }
2565             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2566         }
2567     }
2568
2569     fn ty_and_layout_pointee_info_at(
2570         this: TyAndLayout<'tcx>,
2571         cx: &C,
2572         offset: Size,
2573     ) -> Option<PointeeInfo> {
2574         let tcx = cx.tcx();
2575         let param_env = cx.param_env();
2576
2577         let addr_space_of_ty = |ty: Ty<'tcx>| {
2578             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2579         };
2580
2581         let pointee_info = match *this.ty.kind() {
2582             ty::RawPtr(mt) if offset.bytes() == 0 => {
2583                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2584                     size: layout.size,
2585                     align: layout.align.abi,
2586                     safe: None,
2587                     address_space: addr_space_of_ty(mt.ty),
2588                 })
2589             }
2590             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2591                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2592                     size: layout.size,
2593                     align: layout.align.abi,
2594                     safe: None,
2595                     address_space: cx.data_layout().instruction_address_space,
2596                 })
2597             }
2598             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2599                 let address_space = addr_space_of_ty(ty);
2600                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2601                     // Use conservative pointer kind if not optimizing. This saves us the
2602                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2603                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2604                     PointerKind::Shared
2605                 } else {
2606                     match mt {
2607                         hir::Mutability::Not => {
2608                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2609                                 PointerKind::Frozen
2610                             } else {
2611                                 PointerKind::Shared
2612                             }
2613                         }
2614                         hir::Mutability::Mut => {
2615                             // References to self-referential structures should not be considered
2616                             // noalias, as another pointer to the structure can be obtained, that
2617                             // is not based-on the original reference. We consider all !Unpin
2618                             // types to be potentially self-referential here.
2619                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2620                                 PointerKind::UniqueBorrowed
2621                             } else {
2622                                 PointerKind::Shared
2623                             }
2624                         }
2625                     }
2626                 };
2627
2628                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2629                     size: layout.size,
2630                     align: layout.align.abi,
2631                     safe: Some(kind),
2632                     address_space,
2633                 })
2634             }
2635
2636             _ => {
2637                 let mut data_variant = match this.variants {
2638                     // Within the discriminant field, only the niche itself is
2639                     // always initialized, so we only check for a pointer at its
2640                     // offset.
2641                     //
2642                     // If the niche is a pointer, it's either valid (according
2643                     // to its type), or null (which the niche field's scalar
2644                     // validity range encodes).  This allows using
2645                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2646                     // this will continue to work as long as we don't start
2647                     // using more niches than just null (e.g., the first page of
2648                     // the address space, or unaligned pointers).
2649                     Variants::Multiple {
2650                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2651                         tag_field,
2652                         ..
2653                     } if this.fields.offset(tag_field) == offset => {
2654                         Some(this.for_variant(cx, dataful_variant))
2655                     }
2656                     _ => Some(this),
2657                 };
2658
2659                 if let Some(variant) = data_variant {
2660                     // We're not interested in any unions.
2661                     if let FieldsShape::Union(_) = variant.fields {
2662                         data_variant = None;
2663                     }
2664                 }
2665
2666                 let mut result = None;
2667
2668                 if let Some(variant) = data_variant {
2669                     let ptr_end = offset + Pointer.size(cx);
2670                     for i in 0..variant.fields.count() {
2671                         let field_start = variant.fields.offset(i);
2672                         if field_start <= offset {
2673                             let field = variant.field(cx, i);
2674                             result = field.to_result().ok().and_then(|field| {
2675                                 if ptr_end <= field_start + field.size {
2676                                     // We found the right field, look inside it.
2677                                     let field_info =
2678                                         field.pointee_info_at(cx, offset - field_start);
2679                                     field_info
2680                                 } else {
2681                                     None
2682                                 }
2683                             });
2684                             if result.is_some() {
2685                                 break;
2686                             }
2687                         }
2688                     }
2689                 }
2690
2691                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2692                 if let Some(ref mut pointee) = result {
2693                     if let ty::Adt(def, _) = this.ty.kind() {
2694                         if def.is_box() && offset.bytes() == 0 {
2695                             pointee.safe = Some(PointerKind::UniqueOwned);
2696                         }
2697                     }
2698                 }
2699
2700                 result
2701             }
2702         };
2703
2704         debug!(
2705             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2706             offset,
2707             this.ty.kind(),
2708             pointee_info
2709         );
2710
2711         pointee_info
2712     }
2713
2714     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2715         matches!(this.ty.kind(), ty::Adt(..))
2716     }
2717
2718     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2719         this.ty.kind() == &ty::Never
2720     }
2721
2722     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2723         matches!(this.ty.kind(), ty::Tuple(..))
2724     }
2725
2726     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2727         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2728     }
2729 }
2730
2731 impl<'tcx> ty::Instance<'tcx> {
2732     // NOTE(eddyb) this is private to avoid using it from outside of
2733     // `fn_abi_of_instance` - any other uses are either too high-level
2734     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2735     // or should go through `FnAbi` instead, to avoid losing any
2736     // adjustments `fn_abi_of_instance` might be performing.
2737     fn fn_sig_for_fn_abi(
2738         &self,
2739         tcx: TyCtxt<'tcx>,
2740         param_env: ty::ParamEnv<'tcx>,
2741     ) -> ty::PolyFnSig<'tcx> {
2742         let ty = self.ty(tcx, param_env);
2743         match *ty.kind() {
2744             ty::FnDef(..) => {
2745                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2746                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2747                 // (i.e. due to being inside a projection that got normalized, see
2748                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2749                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2750                 let mut sig = match *ty.kind() {
2751                     ty::FnDef(def_id, substs) => tcx
2752                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
2753                         .subst(tcx, substs),
2754                     _ => unreachable!(),
2755                 };
2756
2757                 if let ty::InstanceDef::VtableShim(..) = self.def {
2758                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2759                     sig = sig.map_bound(|mut sig| {
2760                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2761                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2762                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2763                         sig
2764                     });
2765                 }
2766                 sig
2767             }
2768             ty::Closure(def_id, substs) => {
2769                 let sig = substs.as_closure().sig();
2770
2771                 let bound_vars = tcx.mk_bound_variable_kinds(
2772                     sig.bound_vars()
2773                         .iter()
2774                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2775                 );
2776                 let br = ty::BoundRegion {
2777                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2778                     kind: ty::BoundRegionKind::BrEnv,
2779                 };
2780                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2781                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2782
2783                 let sig = sig.skip_binder();
2784                 ty::Binder::bind_with_vars(
2785                     tcx.mk_fn_sig(
2786                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2787                         sig.output(),
2788                         sig.c_variadic,
2789                         sig.unsafety,
2790                         sig.abi,
2791                     ),
2792                     bound_vars,
2793                 )
2794             }
2795             ty::Generator(_, substs, _) => {
2796                 let sig = substs.as_generator().poly_sig();
2797
2798                 let bound_vars = tcx.mk_bound_variable_kinds(
2799                     sig.bound_vars()
2800                         .iter()
2801                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2802                 );
2803                 let br = ty::BoundRegion {
2804                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2805                     kind: ty::BoundRegionKind::BrEnv,
2806                 };
2807                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2808                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2809
2810                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2811                 let pin_adt_ref = tcx.adt_def(pin_did);
2812                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2813                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2814
2815                 let sig = sig.skip_binder();
2816                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2817                 let state_adt_ref = tcx.adt_def(state_did);
2818                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2819                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2820                 ty::Binder::bind_with_vars(
2821                     tcx.mk_fn_sig(
2822                         [env_ty, sig.resume_ty].iter(),
2823                         &ret_ty,
2824                         false,
2825                         hir::Unsafety::Normal,
2826                         rustc_target::spec::abi::Abi::Rust,
2827                     ),
2828                     bound_vars,
2829                 )
2830             }
2831             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2832         }
2833     }
2834 }
2835
2836 /// Calculates whether a function's ABI can unwind or not.
2837 ///
2838 /// This takes two primary parameters:
2839 ///
2840 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2841 ///   codegen attrs for a defined function. For function pointers this set of
2842 ///   flags is the empty set. This is only applicable for Rust-defined
2843 ///   functions, and generally isn't needed except for small optimizations where
2844 ///   we try to say a function which otherwise might look like it could unwind
2845 ///   doesn't actually unwind (such as for intrinsics and such).
2846 ///
2847 /// * `abi` - this is the ABI that the function is defined with. This is the
2848 ///   primary factor for determining whether a function can unwind or not.
2849 ///
2850 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2851 /// panics are implemented with unwinds on most platform (when
2852 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2853 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2854 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2855 /// defined for each ABI individually, but it always corresponds to some form of
2856 /// stack-based unwinding (the exact mechanism of which varies
2857 /// platform-by-platform).
2858 ///
2859 /// Rust functions are classified whether or not they can unwind based on the
2860 /// active "panic strategy". In other words Rust functions are considered to
2861 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2862 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2863 /// only if the final panic mode is panic=abort. In this scenario any code
2864 /// previously compiled assuming that a function can unwind is still correct, it
2865 /// just never happens to actually unwind at runtime.
2866 ///
2867 /// This function's answer to whether or not a function can unwind is quite
2868 /// impactful throughout the compiler. This affects things like:
2869 ///
2870 /// * Calling a function which can't unwind means codegen simply ignores any
2871 ///   associated unwinding cleanup.
2872 /// * Calling a function which can unwind from a function which can't unwind
2873 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2874 ///   aborts the process.
2875 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2876 ///   affects various optimizations and codegen.
2877 ///
2878 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2879 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2880 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2881 /// might (from a foreign exception or similar).
2882 #[inline]
2883 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2884     if let Some(did) = fn_def_id {
2885         // Special attribute for functions which can't unwind.
2886         if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2887             return false;
2888         }
2889
2890         // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2891         //
2892         // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2893         // function defined in Rust is also required to abort.
2894         if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2895             return false;
2896         }
2897
2898         // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2899         //
2900         // This is not part of `codegen_fn_attrs` as it can differ between crates
2901         // and therefore cannot be computed in core.
2902         if tcx.sess.opts.debugging_opts.panic_in_drop == PanicStrategy::Abort {
2903             if Some(did) == tcx.lang_items().drop_in_place_fn() {
2904                 return false;
2905             }
2906         }
2907     }
2908
2909     // Otherwise if this isn't special then unwinding is generally determined by
2910     // the ABI of the itself. ABIs like `C` have variants which also
2911     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2912     // ABIs have such an option. Otherwise the only other thing here is Rust
2913     // itself, and those ABIs are determined by the panic strategy configured
2914     // for this compilation.
2915     //
2916     // Unfortunately at this time there's also another caveat. Rust [RFC
2917     // 2945][rfc] has been accepted and is in the process of being implemented
2918     // and stabilized. In this interim state we need to deal with historical
2919     // rustc behavior as well as plan for future rustc behavior.
2920     //
2921     // Historically functions declared with `extern "C"` were marked at the
2922     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2923     // or not. This is UB for functions in `panic=unwind` mode that then
2924     // actually panic and unwind. Note that this behavior is true for both
2925     // externally declared functions as well as Rust-defined function.
2926     //
2927     // To fix this UB rustc would like to change in the future to catch unwinds
2928     // from function calls that may unwind within a Rust-defined `extern "C"`
2929     // function and forcibly abort the process, thereby respecting the
2930     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2931     // ready to roll out, so determining whether or not the `C` family of ABIs
2932     // unwinds is conditional not only on their definition but also whether the
2933     // `#![feature(c_unwind)]` feature gate is active.
2934     //
2935     // Note that this means that unlike historical compilers rustc now, by
2936     // default, unconditionally thinks that the `C` ABI may unwind. This will
2937     // prevent some optimization opportunities, however, so we try to scope this
2938     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2939     // to `panic=abort`).
2940     //
2941     // Eventually the check against `c_unwind` here will ideally get removed and
2942     // this'll be a little cleaner as it'll be a straightforward check of the
2943     // ABI.
2944     //
2945     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2946     use SpecAbi::*;
2947     match abi {
2948         C { unwind }
2949         | System { unwind }
2950         | Cdecl { unwind }
2951         | Stdcall { unwind }
2952         | Fastcall { unwind }
2953         | Vectorcall { unwind }
2954         | Thiscall { unwind }
2955         | Aapcs { unwind }
2956         | Win64 { unwind }
2957         | SysV64 { unwind } => {
2958             unwind
2959                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2960         }
2961         PtxKernel
2962         | Msp430Interrupt
2963         | X86Interrupt
2964         | AmdGpuKernel
2965         | EfiApi
2966         | AvrInterrupt
2967         | AvrNonBlockingInterrupt
2968         | CCmseNonSecureCall
2969         | Wasm
2970         | RustIntrinsic
2971         | PlatformIntrinsic
2972         | Unadjusted => false,
2973         Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2974     }
2975 }
2976
2977 #[inline]
2978 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2979     use rustc_target::spec::abi::Abi::*;
2980     match tcx.sess.target.adjust_abi(abi) {
2981         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2982         RustCold => Conv::RustCold,
2983
2984         // It's the ABI's job to select this, not ours.
2985         System { .. } => bug!("system abi should be selected elsewhere"),
2986         EfiApi => bug!("eficall abi should be selected elsewhere"),
2987
2988         Stdcall { .. } => Conv::X86Stdcall,
2989         Fastcall { .. } => Conv::X86Fastcall,
2990         Vectorcall { .. } => Conv::X86VectorCall,
2991         Thiscall { .. } => Conv::X86ThisCall,
2992         C { .. } => Conv::C,
2993         Unadjusted => Conv::C,
2994         Win64 { .. } => Conv::X86_64Win64,
2995         SysV64 { .. } => Conv::X86_64SysV,
2996         Aapcs { .. } => Conv::ArmAapcs,
2997         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2998         PtxKernel => Conv::PtxKernel,
2999         Msp430Interrupt => Conv::Msp430Intr,
3000         X86Interrupt => Conv::X86Intr,
3001         AmdGpuKernel => Conv::AmdGpuKernel,
3002         AvrInterrupt => Conv::AvrInterrupt,
3003         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3004         Wasm => Conv::C,
3005
3006         // These API constants ought to be more specific...
3007         Cdecl { .. } => Conv::C,
3008     }
3009 }
3010
3011 /// Error produced by attempting to compute or adjust a `FnAbi`.
3012 #[derive(Copy, Clone, Debug, HashStable)]
3013 pub enum FnAbiError<'tcx> {
3014     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3015     Layout(LayoutError<'tcx>),
3016
3017     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3018     AdjustForForeignAbi(call::AdjustForForeignAbiError),
3019 }
3020
3021 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3022     fn from(err: LayoutError<'tcx>) -> Self {
3023         Self::Layout(err)
3024     }
3025 }
3026
3027 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3028     fn from(err: call::AdjustForForeignAbiError) -> Self {
3029         Self::AdjustForForeignAbi(err)
3030     }
3031 }
3032
3033 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3034     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3035         match self {
3036             Self::Layout(err) => err.fmt(f),
3037             Self::AdjustForForeignAbi(err) => err.fmt(f),
3038         }
3039     }
3040 }
3041
3042 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3043 // just for error handling.
3044 #[derive(Debug)]
3045 pub enum FnAbiRequest<'tcx> {
3046     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3047     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3048 }
3049
3050 /// Trait for contexts that want to be able to compute `FnAbi`s.
3051 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3052 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3053     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3054     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3055     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3056
3057     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3058     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3059     ///
3060     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3061     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3062     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3063     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3064     fn handle_fn_abi_err(
3065         &self,
3066         err: FnAbiError<'tcx>,
3067         span: Span,
3068         fn_abi_request: FnAbiRequest<'tcx>,
3069     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3070 }
3071
3072 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3073 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3074     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3075     ///
3076     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3077     /// instead, where the instance is an `InstanceDef::Virtual`.
3078     #[inline]
3079     fn fn_abi_of_fn_ptr(
3080         &self,
3081         sig: ty::PolyFnSig<'tcx>,
3082         extra_args: &'tcx ty::List<Ty<'tcx>>,
3083     ) -> Self::FnAbiOfResult {
3084         // FIXME(eddyb) get a better `span` here.
3085         let span = self.layout_tcx_at_span();
3086         let tcx = self.tcx().at(span);
3087
3088         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3089             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3090         ))
3091     }
3092
3093     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3094     /// direct calls to an `fn`.
3095     ///
3096     /// NB: that includes virtual calls, which are represented by "direct calls"
3097     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3098     #[inline]
3099     fn fn_abi_of_instance(
3100         &self,
3101         instance: ty::Instance<'tcx>,
3102         extra_args: &'tcx ty::List<Ty<'tcx>>,
3103     ) -> Self::FnAbiOfResult {
3104         // FIXME(eddyb) get a better `span` here.
3105         let span = self.layout_tcx_at_span();
3106         let tcx = self.tcx().at(span);
3107
3108         MaybeResult::from(
3109             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3110                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3111                 // we can get some kind of span even if one wasn't provided.
3112                 // However, we don't do this early in order to avoid calling
3113                 // `def_span` unconditionally (which may have a perf penalty).
3114                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3115                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3116             }),
3117         )
3118     }
3119 }
3120
3121 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3122
3123 fn fn_abi_of_fn_ptr<'tcx>(
3124     tcx: TyCtxt<'tcx>,
3125     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3126 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3127     let (param_env, (sig, extra_args)) = query.into_parts();
3128
3129     LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3130 }
3131
3132 fn fn_abi_of_instance<'tcx>(
3133     tcx: TyCtxt<'tcx>,
3134     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3135 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3136     let (param_env, (instance, extra_args)) = query.into_parts();
3137
3138     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3139
3140     let caller_location = if instance.def.requires_caller_location(tcx) {
3141         Some(tcx.caller_location_ty())
3142     } else {
3143         None
3144     };
3145
3146     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3147         sig,
3148         extra_args,
3149         caller_location,
3150         Some(instance.def_id()),
3151         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3152     )
3153 }
3154
3155 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3156     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3157     // arguments of this method, into a separate `struct`.
3158     fn fn_abi_new_uncached(
3159         &self,
3160         sig: ty::PolyFnSig<'tcx>,
3161         extra_args: &[Ty<'tcx>],
3162         caller_location: Option<Ty<'tcx>>,
3163         fn_def_id: Option<DefId>,
3164         // FIXME(eddyb) replace this with something typed, like an `enum`.
3165         force_thin_self_ptr: bool,
3166     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3167         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3168
3169         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3170
3171         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3172
3173         let mut inputs = sig.inputs();
3174         let extra_args = if sig.abi == RustCall {
3175             assert!(!sig.c_variadic && extra_args.is_empty());
3176
3177             if let Some(input) = sig.inputs().last() {
3178                 if let ty::Tuple(tupled_arguments) = input.kind() {
3179                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3180                     tupled_arguments
3181                 } else {
3182                     bug!(
3183                         "argument to function with \"rust-call\" ABI \
3184                             is not a tuple"
3185                     );
3186                 }
3187             } else {
3188                 bug!(
3189                     "argument to function with \"rust-call\" ABI \
3190                         is not a tuple"
3191                 );
3192             }
3193         } else {
3194             assert!(sig.c_variadic || extra_args.is_empty());
3195             extra_args
3196         };
3197
3198         let target = &self.tcx.sess.target;
3199         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3200         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3201         let linux_s390x_gnu_like =
3202             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3203         let linux_sparc64_gnu_like =
3204             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3205         let linux_powerpc_gnu_like =
3206             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3207         use SpecAbi::*;
3208         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3209
3210         // Handle safe Rust thin and fat pointers.
3211         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3212                                       scalar: Scalar,
3213                                       layout: TyAndLayout<'tcx>,
3214                                       offset: Size,
3215                                       is_return: bool| {
3216             // Booleans are always a noundef i1 that needs to be zero-extended.
3217             if scalar.is_bool() {
3218                 attrs.ext(ArgExtension::Zext);
3219                 attrs.set(ArgAttribute::NoUndef);
3220                 return;
3221             }
3222
3223             // Scalars which have invalid values cannot be undef.
3224             if !scalar.is_always_valid(self) {
3225                 attrs.set(ArgAttribute::NoUndef);
3226             }
3227
3228             // Only pointer types handled below.
3229             let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3230
3231             if !valid_range.contains(0) {
3232                 attrs.set(ArgAttribute::NonNull);
3233             }
3234
3235             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3236                 if let Some(kind) = pointee.safe {
3237                     attrs.pointee_align = Some(pointee.align);
3238
3239                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3240                     // for the entire duration of the function as they can be deallocated
3241                     // at any time. Set their valid size to 0.
3242                     attrs.pointee_size = match kind {
3243                         PointerKind::UniqueOwned => Size::ZERO,
3244                         _ => pointee.size,
3245                     };
3246
3247                     // `Box`, `&T`, and `&mut T` cannot be undef.
3248                     // Note that this only applies to the value of the pointer itself;
3249                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3250                     attrs.set(ArgAttribute::NoUndef);
3251
3252                     // `Box` pointer parameters never alias because ownership is transferred
3253                     // `&mut` pointer parameters never alias other parameters,
3254                     // or mutable global data
3255                     //
3256                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3257                     // and can be marked as both `readonly` and `noalias`, as
3258                     // LLVM's definition of `noalias` is based solely on memory
3259                     // dependencies rather than pointer equality
3260                     //
3261                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3262                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3263                     // or not to actually emit the attribute. It can also be controlled with the
3264                     // `-Zmutable-noalias` debugging option.
3265                     let no_alias = match kind {
3266                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3267                         PointerKind::UniqueOwned => true,
3268                         PointerKind::Frozen => !is_return,
3269                     };
3270                     if no_alias {
3271                         attrs.set(ArgAttribute::NoAlias);
3272                     }
3273
3274                     if kind == PointerKind::Frozen && !is_return {
3275                         attrs.set(ArgAttribute::ReadOnly);
3276                     }
3277
3278                     if kind == PointerKind::UniqueBorrowed && !is_return {
3279                         attrs.set(ArgAttribute::NoAliasMutRef);
3280                     }
3281                 }
3282             }
3283         };
3284
3285         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3286             let is_return = arg_idx.is_none();
3287
3288             let layout = self.layout_of(ty)?;
3289             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3290                 // Don't pass the vtable, it's not an argument of the virtual fn.
3291                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3292                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3293                 make_thin_self_ptr(self, layout)
3294             } else {
3295                 layout
3296             };
3297
3298             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3299                 let mut attrs = ArgAttributes::new();
3300                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3301                 attrs
3302             });
3303
3304             if arg.layout.is_zst() {
3305                 // For some forsaken reason, x86_64-pc-windows-gnu
3306                 // doesn't ignore zero-sized struct arguments.
3307                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3308                 if is_return
3309                     || rust_abi
3310                     || (!win_x64_gnu
3311                         && !linux_s390x_gnu_like
3312                         && !linux_sparc64_gnu_like
3313                         && !linux_powerpc_gnu_like)
3314                 {
3315                     arg.mode = PassMode::Ignore;
3316                 }
3317             }
3318
3319             Ok(arg)
3320         };
3321
3322         let mut fn_abi = FnAbi {
3323             ret: arg_of(sig.output(), None)?,
3324             args: inputs
3325                 .iter()
3326                 .copied()
3327                 .chain(extra_args.iter().copied())
3328                 .chain(caller_location)
3329                 .enumerate()
3330                 .map(|(i, ty)| arg_of(ty, Some(i)))
3331                 .collect::<Result<_, _>>()?,
3332             c_variadic: sig.c_variadic,
3333             fixed_count: inputs.len(),
3334             conv,
3335             can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3336         };
3337         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3338         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3339         Ok(self.tcx.arena.alloc(fn_abi))
3340     }
3341
3342     fn fn_abi_adjust_for_abi(
3343         &self,
3344         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3345         abi: SpecAbi,
3346     ) -> Result<(), FnAbiError<'tcx>> {
3347         if abi == SpecAbi::Unadjusted {
3348             return Ok(());
3349         }
3350
3351         if abi == SpecAbi::Rust
3352             || abi == SpecAbi::RustCall
3353             || abi == SpecAbi::RustIntrinsic
3354             || abi == SpecAbi::PlatformIntrinsic
3355         {
3356             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3357                 if arg.is_ignore() {
3358                     return;
3359                 }
3360
3361                 match arg.layout.abi {
3362                     Abi::Aggregate { .. } => {}
3363
3364                     // This is a fun case! The gist of what this is doing is
3365                     // that we want callers and callees to always agree on the
3366                     // ABI of how they pass SIMD arguments. If we were to *not*
3367                     // make these arguments indirect then they'd be immediates
3368                     // in LLVM, which means that they'd used whatever the
3369                     // appropriate ABI is for the callee and the caller. That
3370                     // means, for example, if the caller doesn't have AVX
3371                     // enabled but the callee does, then passing an AVX argument
3372                     // across this boundary would cause corrupt data to show up.
3373                     //
3374                     // This problem is fixed by unconditionally passing SIMD
3375                     // arguments through memory between callers and callees
3376                     // which should get them all to agree on ABI regardless of
3377                     // target feature sets. Some more information about this
3378                     // issue can be found in #44367.
3379                     //
3380                     // Note that the platform intrinsic ABI is exempt here as
3381                     // that's how we connect up to LLVM and it's unstable
3382                     // anyway, we control all calls to it in libstd.
3383                     Abi::Vector { .. }
3384                         if abi != SpecAbi::PlatformIntrinsic
3385                             && self.tcx.sess.target.simd_types_indirect =>
3386                     {
3387                         arg.make_indirect();
3388                         return;
3389                     }
3390
3391                     _ => return,
3392                 }
3393
3394                 let size = arg.layout.size;
3395                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3396                     arg.make_indirect();
3397                 } else {
3398                     // We want to pass small aggregates as immediates, but using
3399                     // a LLVM aggregate type for this leads to bad optimizations,
3400                     // so we pick an appropriately sized integer type instead.
3401                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3402                 }
3403             };
3404             fixup(&mut fn_abi.ret);
3405             for arg in &mut fn_abi.args {
3406                 fixup(arg);
3407             }
3408         } else {
3409             fn_abi.adjust_for_foreign_abi(self, abi)?;
3410         }
3411
3412         Ok(())
3413     }
3414 }
3415
3416 fn make_thin_self_ptr<'tcx>(
3417     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3418     layout: TyAndLayout<'tcx>,
3419 ) -> TyAndLayout<'tcx> {
3420     let tcx = cx.tcx();
3421     let fat_pointer_ty = if layout.is_unsized() {
3422         // unsized `self` is passed as a pointer to `self`
3423         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3424         tcx.mk_mut_ptr(layout.ty)
3425     } else {
3426         match layout.abi {
3427             Abi::ScalarPair(..) => (),
3428             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3429         }
3430
3431         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3432         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3433         // elsewhere in the compiler as a method on a `dyn Trait`.
3434         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3435         // get a built-in pointer type
3436         let mut fat_pointer_layout = layout;
3437         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3438             && !fat_pointer_layout.ty.is_region_ptr()
3439         {
3440             for i in 0..fat_pointer_layout.fields.count() {
3441                 let field_layout = fat_pointer_layout.field(cx, i);
3442
3443                 if !field_layout.is_zst() {
3444                     fat_pointer_layout = field_layout;
3445                     continue 'descend_newtypes;
3446                 }
3447             }
3448
3449             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3450         }
3451
3452         fat_pointer_layout.ty
3453     };
3454
3455     // we now have a type like `*mut RcBox<dyn Trait>`
3456     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3457     // this is understood as a special case elsewhere in the compiler
3458     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3459
3460     TyAndLayout {
3461         ty: fat_pointer_ty,
3462
3463         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3464         // should always work because the type is always `*mut ()`.
3465         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3466     }
3467 }