]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_ty_utils/src/layout.rs
07af3dc5164786208a9e05784f5cdc169a4c8f47
[rust.git] / compiler / rustc_ty_utils / src / layout.rs
1 use rustc_hir as hir;
2 use rustc_index::bit_set::BitSet;
3 use rustc_index::vec::{Idx, IndexVec};
4 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
5 use rustc_middle::ty::layout::{
6     IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
7 };
8 use rustc_middle::ty::{
9     self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
10 };
11 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
12 use rustc_span::symbol::Symbol;
13 use rustc_span::DUMMY_SP;
14 use rustc_target::abi::*;
15
16 use std::cmp::{self, Ordering};
17 use std::iter;
18 use std::num::NonZeroUsize;
19 use std::ops::Bound;
20
21 use rand::{seq::SliceRandom, SeedableRng};
22 use rand_xoshiro::Xoshiro128StarStar;
23
24 use crate::layout_sanity_check::sanity_check_layout;
25
26 pub fn provide(providers: &mut ty::query::Providers) {
27     *providers = ty::query::Providers { layout_of, ..*providers };
28 }
29
30 #[instrument(skip(tcx, query), level = "debug")]
31 fn layout_of<'tcx>(
32     tcx: TyCtxt<'tcx>,
33     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
34 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
35     let (param_env, ty) = query.into_parts();
36     debug!(?ty);
37
38     let param_env = param_env.with_reveal_all_normalized(tcx);
39     let unnormalized_ty = ty;
40
41     // FIXME: We might want to have two different versions of `layout_of`:
42     // One that can be called after typecheck has completed and can use
43     // `normalize_erasing_regions` here and another one that can be called
44     // before typecheck has completed and uses `try_normalize_erasing_regions`.
45     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
46         Ok(t) => t,
47         Err(normalization_error) => {
48             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
49         }
50     };
51
52     if ty != unnormalized_ty {
53         // Ensure this layout is also cached for the normalized type.
54         return tcx.layout_of(param_env.and(ty));
55     }
56
57     let cx = LayoutCx { tcx, param_env };
58
59     let layout = layout_of_uncached(&cx, ty)?;
60     let layout = TyAndLayout { ty, layout };
61
62     record_layout_for_printing(&cx, layout);
63
64     sanity_check_layout(&cx, &layout);
65
66     Ok(layout)
67 }
68
69 #[derive(Copy, Clone, Debug)]
70 enum StructKind {
71     /// A tuple, closure, or univariant which cannot be coerced to unsized.
72     AlwaysSized,
73     /// A univariant, the last field of which may be coerced to unsized.
74     MaybeUnsized,
75     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
76     Prefixed(Size, Align),
77 }
78
79 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
80 // This is used to go between `memory_index` (source field order to memory order)
81 // and `inverse_memory_index` (memory order to source field order).
82 // See also `FieldsShape::Arbitrary::memory_index` for more details.
83 // FIXME(eddyb) build a better abstraction for permutations, if possible.
84 fn invert_mapping(map: &[u32]) -> Vec<u32> {
85     let mut inverse = vec![0; map.len()];
86     for i in 0..map.len() {
87         inverse[map[i] as usize] = i as u32;
88     }
89     inverse
90 }
91
92 fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
93     let dl = cx.data_layout();
94     let b_align = b.align(dl);
95     let align = a.align(dl).max(b_align).max(dl.aggregate_align);
96     let b_offset = a.size(dl).align_to(b_align.abi);
97     let size = (b_offset + b.size(dl)).align_to(align.abi);
98
99     // HACK(nox): We iter on `b` and then `a` because `max_by_key`
100     // returns the last maximum.
101     let largest_niche = Niche::from_scalar(dl, b_offset, b)
102         .into_iter()
103         .chain(Niche::from_scalar(dl, Size::ZERO, a))
104         .max_by_key(|niche| niche.available(dl));
105
106     LayoutS {
107         variants: Variants::Single { index: VariantIdx::new(0) },
108         fields: FieldsShape::Arbitrary {
109             offsets: vec![Size::ZERO, b_offset],
110             memory_index: vec![0, 1],
111         },
112         abi: Abi::ScalarPair(a, b),
113         largest_niche,
114         align,
115         size,
116     }
117 }
118
119 fn univariant_uninterned<'tcx>(
120     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
121     ty: Ty<'tcx>,
122     fields: &[TyAndLayout<'_>],
123     repr: &ReprOptions,
124     kind: StructKind,
125 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
126     let dl = cx.data_layout();
127     let pack = repr.pack;
128     if pack.is_some() && repr.align.is_some() {
129         cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
130         return Err(LayoutError::Unknown(ty));
131     }
132
133     let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
134
135     let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
136
137     let optimize = !repr.inhibit_struct_field_reordering_opt();
138     if optimize {
139         let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
140         let optimizing = &mut inverse_memory_index[..end];
141         let effective_field_align = |f: &TyAndLayout<'_>| {
142             if let Some(pack) = pack {
143                 // return the packed alignment in bytes
144                 f.align.abi.min(pack).bytes()
145             } else {
146                 // returns log2(effective-align).
147                 // This is ok since `pack` applies to all fields equally.
148                 // The calculation assumes that size is an integer multiple of align, except for ZSTs.
149                 //
150                 // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
151                 f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
152             }
153         };
154
155         // If `-Z randomize-layout` was enabled for the type definition we can shuffle
156         // the field ordering to try and catch some code making assumptions about layouts
157         // we don't guarantee
158         if repr.can_randomize_type_layout() {
159             // `ReprOptions.layout_seed` is a deterministic seed that we can use to
160             // randomize field ordering with
161             let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
162
163             // Shuffle the ordering of the fields
164             optimizing.shuffle(&mut rng);
165
166             // Otherwise we just leave things alone and actually optimize the type's fields
167         } else {
168             match kind {
169                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
170                     optimizing.sort_by_key(|&x| {
171                         // Place ZSTs first to avoid "interesting offsets",
172                         // especially with only one or two non-ZST fields.
173                         // Then place largest alignments first, largest niches within an alignment group last
174                         let f = &fields[x as usize];
175                         let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
176                         (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
177                     });
178                 }
179
180                 StructKind::Prefixed(..) => {
181                     // Sort in ascending alignment so that the layout stays optimal
182                     // regardless of the prefix.
183                     // And put the largest niche in an alignment group at the end
184                     // so it can be used as discriminant in jagged enums
185                     optimizing.sort_by_key(|&x| {
186                         let f = &fields[x as usize];
187                         let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
188                         (effective_field_align(f), niche_size)
189                     });
190                 }
191             }
192
193             // FIXME(Kixiron): We can always shuffle fields within a given alignment class
194             //                 regardless of the status of `-Z randomize-layout`
195         }
196     }
197
198     // inverse_memory_index holds field indices by increasing memory offset.
199     // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
200     // We now write field offsets to the corresponding offset slot;
201     // field 5 with offset 0 puts 0 in offsets[5].
202     // At the bottom of this function, we invert `inverse_memory_index` to
203     // produce `memory_index` (see `invert_mapping`).
204
205     let mut sized = true;
206     let mut offsets = vec![Size::ZERO; fields.len()];
207     let mut offset = Size::ZERO;
208     let mut largest_niche = None;
209     let mut largest_niche_available = 0;
210
211     if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
212         let prefix_align =
213             if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
214         align = align.max(AbiAndPrefAlign::new(prefix_align));
215         offset = prefix_size.align_to(prefix_align);
216     }
217
218     for &i in &inverse_memory_index {
219         let field = fields[i as usize];
220         if !sized {
221             cx.tcx.sess.delay_span_bug(
222                 DUMMY_SP,
223                 &format!(
224                     "univariant: field #{} of `{}` comes after unsized field",
225                     offsets.len(),
226                     ty
227                 ),
228             );
229         }
230
231         if field.is_unsized() {
232             sized = false;
233         }
234
235         // Invariant: offset < dl.obj_size_bound() <= 1<<61
236         let field_align = if let Some(pack) = pack {
237             field.align.min(AbiAndPrefAlign::new(pack))
238         } else {
239             field.align
240         };
241         offset = offset.align_to(field_align.abi);
242         align = align.max(field_align);
243
244         debug!("univariant offset: {:?} field: {:#?}", offset, field);
245         offsets[i as usize] = offset;
246
247         if let Some(mut niche) = field.largest_niche {
248             let available = niche.available(dl);
249             if available > largest_niche_available {
250                 largest_niche_available = available;
251                 niche.offset += offset;
252                 largest_niche = Some(niche);
253             }
254         }
255
256         offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
257     }
258
259     if let Some(repr_align) = repr.align {
260         align = align.max(AbiAndPrefAlign::new(repr_align));
261     }
262
263     debug!("univariant min_size: {:?}", offset);
264     let min_size = offset;
265
266     // As stated above, inverse_memory_index holds field indices by increasing offset.
267     // This makes it an already-sorted view of the offsets vec.
268     // To invert it, consider:
269     // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
270     // Field 5 would be the first element, so memory_index is i:
271     // Note: if we didn't optimize, it's already right.
272
273     let memory_index =
274         if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
275
276     let size = min_size.align_to(align.abi);
277     let mut abi = Abi::Aggregate { sized };
278
279     // Unpack newtype ABIs and find scalar pairs.
280     if sized && size.bytes() > 0 {
281         // All other fields must be ZSTs.
282         let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
283
284         match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
285             // We have exactly one non-ZST field.
286             (Some((i, field)), None, None) => {
287                 // Field fills the struct and it has a scalar or scalar pair ABI.
288                 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
289                     match field.abi {
290                         // For plain scalars, or vectors of them, we can't unpack
291                         // newtypes for `#[repr(C)]`, as that affects C ABIs.
292                         Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
293                             abi = field.abi;
294                         }
295                         // But scalar pairs are Rust-specific and get
296                         // treated as aggregates by C ABIs anyway.
297                         Abi::ScalarPair(..) => {
298                             abi = field.abi;
299                         }
300                         _ => {}
301                     }
302                 }
303             }
304
305             // Two non-ZST fields, and they're both scalars.
306             (Some((i, a)), Some((j, b)), None) => {
307                 match (a.abi, b.abi) {
308                     (Abi::Scalar(a), Abi::Scalar(b)) => {
309                         // Order by the memory placement, not source order.
310                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
311                             ((i, a), (j, b))
312                         } else {
313                             ((j, b), (i, a))
314                         };
315                         let pair = scalar_pair(cx, a, b);
316                         let pair_offsets = match pair.fields {
317                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
318                                 assert_eq!(memory_index, &[0, 1]);
319                                 offsets
320                             }
321                             _ => bug!(),
322                         };
323                         if offsets[i] == pair_offsets[0]
324                             && offsets[j] == pair_offsets[1]
325                             && align == pair.align
326                             && size == pair.size
327                         {
328                             // We can use `ScalarPair` only when it matches our
329                             // already computed layout (including `#[repr(C)]`).
330                             abi = pair.abi;
331                         }
332                     }
333                     _ => {}
334                 }
335             }
336
337             _ => {}
338         }
339     }
340
341     if fields.iter().any(|f| f.abi.is_uninhabited()) {
342         abi = Abi::Uninhabited;
343     }
344
345     Ok(LayoutS {
346         variants: Variants::Single { index: VariantIdx::new(0) },
347         fields: FieldsShape::Arbitrary { offsets, memory_index },
348         abi,
349         largest_niche,
350         align,
351         size,
352     })
353 }
354
355 fn layout_of_uncached<'tcx>(
356     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
357     ty: Ty<'tcx>,
358 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
359     let tcx = cx.tcx;
360     let param_env = cx.param_env;
361     let dl = cx.data_layout();
362     let scalar_unit = |value: Primitive| {
363         let size = value.size(dl);
364         assert!(size.bits() <= 128);
365         Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
366     };
367     let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
368
369     let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
370         Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
371     };
372     debug_assert!(!ty.has_non_region_infer());
373
374     Ok(match *ty.kind() {
375         // Basic scalars.
376         ty::Bool => tcx.intern_layout(LayoutS::scalar(
377             cx,
378             Scalar::Initialized {
379                 value: Int(I8, false),
380                 valid_range: WrappingRange { start: 0, end: 1 },
381             },
382         )),
383         ty::Char => tcx.intern_layout(LayoutS::scalar(
384             cx,
385             Scalar::Initialized {
386                 value: Int(I32, false),
387                 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
388             },
389         )),
390         ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
391         ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
392         ty::Float(fty) => scalar(match fty {
393             ty::FloatTy::F32 => F32,
394             ty::FloatTy::F64 => F64,
395         }),
396         ty::FnPtr(_) => {
397             let mut ptr = scalar_unit(Pointer);
398             ptr.valid_range_mut().start = 1;
399             tcx.intern_layout(LayoutS::scalar(cx, ptr))
400         }
401
402         // The never type.
403         ty::Never => tcx.intern_layout(LayoutS {
404             variants: Variants::Single { index: VariantIdx::new(0) },
405             fields: FieldsShape::Primitive,
406             abi: Abi::Uninhabited,
407             largest_niche: None,
408             align: dl.i8_align,
409             size: Size::ZERO,
410         }),
411
412         // Potentially-wide pointers.
413         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
414             let mut data_ptr = scalar_unit(Pointer);
415             if !ty.is_unsafe_ptr() {
416                 data_ptr.valid_range_mut().start = 1;
417             }
418
419             let pointee = tcx.normalize_erasing_regions(param_env, pointee);
420             if pointee.is_sized(tcx, param_env) {
421                 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
422             }
423
424             let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
425             let metadata = match unsized_part.kind() {
426                 ty::Foreign(..) => {
427                     return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
428                 }
429                 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
430                 ty::Dynamic(..) => {
431                     let mut vtable = scalar_unit(Pointer);
432                     vtable.valid_range_mut().start = 1;
433                     vtable
434                 }
435                 _ => return Err(LayoutError::Unknown(unsized_part)),
436             };
437
438             // Effectively a (ptr, meta) tuple.
439             tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
440         }
441
442         ty::Dynamic(_, _, ty::DynStar) => {
443             let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
444             data.valid_range_mut().start = 0;
445             let mut vtable = scalar_unit(Pointer);
446             vtable.valid_range_mut().start = 1;
447             tcx.intern_layout(scalar_pair(cx, data, vtable))
448         }
449
450         // Arrays and slices.
451         ty::Array(element, mut count) => {
452             if count.has_projections() {
453                 count = tcx.normalize_erasing_regions(param_env, count);
454                 if count.has_projections() {
455                     return Err(LayoutError::Unknown(ty));
456                 }
457             }
458
459             let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
460             let element = cx.layout_of(element)?;
461             let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
462
463             let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
464                 Abi::Uninhabited
465             } else {
466                 Abi::Aggregate { sized: true }
467             };
468
469             let largest_niche = if count != 0 { element.largest_niche } else { None };
470
471             tcx.intern_layout(LayoutS {
472                 variants: Variants::Single { index: VariantIdx::new(0) },
473                 fields: FieldsShape::Array { stride: element.size, count },
474                 abi,
475                 largest_niche,
476                 align: element.align,
477                 size,
478             })
479         }
480         ty::Slice(element) => {
481             let element = cx.layout_of(element)?;
482             tcx.intern_layout(LayoutS {
483                 variants: Variants::Single { index: VariantIdx::new(0) },
484                 fields: FieldsShape::Array { stride: element.size, count: 0 },
485                 abi: Abi::Aggregate { sized: false },
486                 largest_niche: None,
487                 align: element.align,
488                 size: Size::ZERO,
489             })
490         }
491         ty::Str => tcx.intern_layout(LayoutS {
492             variants: Variants::Single { index: VariantIdx::new(0) },
493             fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
494             abi: Abi::Aggregate { sized: false },
495             largest_niche: None,
496             align: dl.i8_align,
497             size: Size::ZERO,
498         }),
499
500         // Odd unit types.
501         ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
502         ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
503             let mut unit = univariant_uninterned(
504                 cx,
505                 ty,
506                 &[],
507                 &ReprOptions::default(),
508                 StructKind::AlwaysSized,
509             )?;
510             match unit.abi {
511                 Abi::Aggregate { ref mut sized } => *sized = false,
512                 _ => bug!(),
513             }
514             tcx.intern_layout(unit)
515         }
516
517         ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
518
519         ty::Closure(_, ref substs) => {
520             let tys = substs.as_closure().upvar_tys();
521             univariant(
522                 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
523                 &ReprOptions::default(),
524                 StructKind::AlwaysSized,
525             )?
526         }
527
528         ty::Tuple(tys) => {
529             let kind =
530                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
531
532             univariant(
533                 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
534                 &ReprOptions::default(),
535                 kind,
536             )?
537         }
538
539         // SIMD vector types.
540         ty::Adt(def, substs) if def.repr().simd() => {
541             if !def.is_struct() {
542                 // Should have yielded E0517 by now.
543                 tcx.sess.delay_span_bug(
544                     DUMMY_SP,
545                     "#[repr(simd)] was applied to an ADT that is not a struct",
546                 );
547                 return Err(LayoutError::Unknown(ty));
548             }
549
550             // Supported SIMD vectors are homogeneous ADTs with at least one field:
551             //
552             // * #[repr(simd)] struct S(T, T, T, T);
553             // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
554             // * #[repr(simd)] struct S([T; 4])
555             //
556             // where T is a primitive scalar (integer/float/pointer).
557
558             // SIMD vectors with zero fields are not supported.
559             // (should be caught by typeck)
560             if def.non_enum_variant().fields.is_empty() {
561                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
562             }
563
564             // Type of the first ADT field:
565             let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
566
567             // Heterogeneous SIMD vectors are not supported:
568             // (should be caught by typeck)
569             for fi in &def.non_enum_variant().fields {
570                 if fi.ty(tcx, substs) != f0_ty {
571                     tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
572                 }
573             }
574
575             // The element type and number of elements of the SIMD vector
576             // are obtained from:
577             //
578             // * the element type and length of the single array field, if
579             // the first field is of array type, or
580             //
581             // * the homogeneous field type and the number of fields.
582             let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
583                 // First ADT field is an array:
584
585                 // SIMD vectors with multiple array fields are not supported:
586                 // (should be caught by typeck)
587                 if def.non_enum_variant().fields.len() != 1 {
588                     tcx.sess.fatal(&format!(
589                         "monomorphising SIMD type `{}` with more than one array field",
590                         ty
591                     ));
592                 }
593
594                 // Extract the number of elements from the layout of the array field:
595                 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
596                         return Err(LayoutError::Unknown(ty));
597                     };
598
599                 (*e_ty, *count, true)
600             } else {
601                 // First ADT field is not an array:
602                 (f0_ty, def.non_enum_variant().fields.len() as _, false)
603             };
604
605             // SIMD vectors of zero length are not supported.
606             // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
607             // support.
608             //
609             // Can't be caught in typeck if the array length is generic.
610             if e_len == 0 {
611                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
612             } else if e_len > MAX_SIMD_LANES {
613                 tcx.sess.fatal(&format!(
614                     "monomorphising SIMD type `{}` of length greater than {}",
615                     ty, MAX_SIMD_LANES,
616                 ));
617             }
618
619             // Compute the ABI of the element type:
620             let e_ly = cx.layout_of(e_ty)?;
621             let Abi::Scalar(e_abi) = e_ly.abi else {
622                     // This error isn't caught in typeck, e.g., if
623                     // the element type of the vector is generic.
624                     tcx.sess.fatal(&format!(
625                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
626                         (integer/float/pointer) element type `{}`",
627                         ty, e_ty
628                     ))
629                 };
630
631             // Compute the size and alignment of the vector:
632             let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
633             let align = dl.vector_align(size);
634             let size = size.align_to(align.abi);
635
636             // Compute the placement of the vector fields:
637             let fields = if is_array {
638                 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
639             } else {
640                 FieldsShape::Array { stride: e_ly.size, count: e_len }
641             };
642
643             tcx.intern_layout(LayoutS {
644                 variants: Variants::Single { index: VariantIdx::new(0) },
645                 fields,
646                 abi: Abi::Vector { element: e_abi, count: e_len },
647                 largest_niche: e_ly.largest_niche,
648                 size,
649                 align,
650             })
651         }
652
653         // ADTs.
654         ty::Adt(def, substs) => {
655             // Cache the field layouts.
656             let variants = def
657                 .variants()
658                 .iter()
659                 .map(|v| {
660                     v.fields
661                         .iter()
662                         .map(|field| cx.layout_of(field.ty(tcx, substs)))
663                         .collect::<Result<Vec<_>, _>>()
664                 })
665                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
666
667             if def.is_union() {
668                 if def.repr().pack.is_some() && def.repr().align.is_some() {
669                     cx.tcx.sess.delay_span_bug(
670                         tcx.def_span(def.did()),
671                         "union cannot be packed and aligned",
672                     );
673                     return Err(LayoutError::Unknown(ty));
674                 }
675
676                 let mut align =
677                     if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
678
679                 if let Some(repr_align) = def.repr().align {
680                     align = align.max(AbiAndPrefAlign::new(repr_align));
681                 }
682
683                 let optimize = !def.repr().inhibit_union_abi_opt();
684                 let mut size = Size::ZERO;
685                 let mut abi = Abi::Aggregate { sized: true };
686                 let index = VariantIdx::new(0);
687                 for field in &variants[index] {
688                     assert!(field.is_sized());
689                     align = align.max(field.align);
690
691                     // If all non-ZST fields have the same ABI, forward this ABI
692                     if optimize && !field.is_zst() {
693                         // Discard valid range information and allow undef
694                         let field_abi = match field.abi {
695                             Abi::Scalar(x) => Abi::Scalar(x.to_union()),
696                             Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
697                             Abi::Vector { element: x, count } => {
698                                 Abi::Vector { element: x.to_union(), count }
699                             }
700                             Abi::Uninhabited | Abi::Aggregate { .. } => {
701                                 Abi::Aggregate { sized: true }
702                             }
703                         };
704
705                         if size == Size::ZERO {
706                             // first non ZST: initialize 'abi'
707                             abi = field_abi;
708                         } else if abi != field_abi {
709                             // different fields have different ABI: reset to Aggregate
710                             abi = Abi::Aggregate { sized: true };
711                         }
712                     }
713
714                     size = cmp::max(size, field.size);
715                 }
716
717                 if let Some(pack) = def.repr().pack {
718                     align = align.min(AbiAndPrefAlign::new(pack));
719                 }
720
721                 return Ok(tcx.intern_layout(LayoutS {
722                     variants: Variants::Single { index },
723                     fields: FieldsShape::Union(
724                         NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
725                     ),
726                     abi,
727                     largest_niche: None,
728                     align,
729                     size: size.align_to(align.abi),
730                 }));
731             }
732
733             // A variant is absent if it's uninhabited and only has ZST fields.
734             // Present uninhabited variants only require space for their fields,
735             // but *not* an encoding of the discriminant (e.g., a tag value).
736             // See issue #49298 for more details on the need to leave space
737             // for non-ZST uninhabited data (mostly partial initialization).
738             let absent = |fields: &[TyAndLayout<'_>]| {
739                 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
740                 let is_zst = fields.iter().all(|f| f.is_zst());
741                 uninhabited && is_zst
742             };
743             let (present_first, present_second) = {
744                 let mut present_variants = variants
745                     .iter_enumerated()
746                     .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
747                 (present_variants.next(), present_variants.next())
748             };
749             let present_first = match present_first {
750                 Some(present_first) => present_first,
751                 // Uninhabited because it has no variants, or only absent ones.
752                 None if def.is_enum() => {
753                     return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
754                 }
755                 // If it's a struct, still compute a layout so that we can still compute the
756                 // field offsets.
757                 None => VariantIdx::new(0),
758             };
759
760             let is_struct = !def.is_enum() ||
761                     // Only one variant is present.
762                     (present_second.is_none() &&
763                         // Representation optimizations are allowed.
764                         !def.repr().inhibit_enum_layout_opt());
765             if is_struct {
766                 // Struct, or univariant enum equivalent to a struct.
767                 // (Typechecking will reject discriminant-sizing attrs.)
768
769                 let v = present_first;
770                 let kind = if def.is_enum() || variants[v].is_empty() {
771                     StructKind::AlwaysSized
772                 } else {
773                     let param_env = tcx.param_env(def.did());
774                     let last_field = def.variant(v).fields.last().unwrap();
775                     let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
776                     if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
777                 };
778
779                 let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
780                 st.variants = Variants::Single { index: v };
781
782                 if def.is_unsafe_cell() {
783                     let hide_niches = |scalar: &mut _| match scalar {
784                         Scalar::Initialized { value, valid_range } => {
785                             *valid_range = WrappingRange::full(value.size(dl))
786                         }
787                         // Already doesn't have any niches
788                         Scalar::Union { .. } => {}
789                     };
790                     match &mut st.abi {
791                         Abi::Uninhabited => {}
792                         Abi::Scalar(scalar) => hide_niches(scalar),
793                         Abi::ScalarPair(a, b) => {
794                             hide_niches(a);
795                             hide_niches(b);
796                         }
797                         Abi::Vector { element, count: _ } => hide_niches(element),
798                         Abi::Aggregate { sized: _ } => {}
799                     }
800                     st.largest_niche = None;
801                     return Ok(tcx.intern_layout(st));
802                 }
803
804                 let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
805                 match st.abi {
806                     Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
807                         // the asserts ensure that we are not using the
808                         // `#[rustc_layout_scalar_valid_range(n)]`
809                         // attribute to widen the range of anything as that would probably
810                         // result in UB somewhere
811                         // FIXME(eddyb) the asserts are probably not needed,
812                         // as larger validity ranges would result in missed
813                         // optimizations, *not* wrongly assuming the inner
814                         // value is valid. e.g. unions enlarge validity ranges,
815                         // because the values may be uninitialized.
816                         if let Bound::Included(start) = start {
817                             // FIXME(eddyb) this might be incorrect - it doesn't
818                             // account for wrap-around (end < start) ranges.
819                             let valid_range = scalar.valid_range_mut();
820                             assert!(valid_range.start <= start);
821                             valid_range.start = start;
822                         }
823                         if let Bound::Included(end) = end {
824                             // FIXME(eddyb) this might be incorrect - it doesn't
825                             // account for wrap-around (end < start) ranges.
826                             let valid_range = scalar.valid_range_mut();
827                             assert!(valid_range.end >= end);
828                             valid_range.end = end;
829                         }
830
831                         // Update `largest_niche` if we have introduced a larger niche.
832                         let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
833                         if let Some(niche) = niche {
834                             match st.largest_niche {
835                                 Some(largest_niche) => {
836                                     // Replace the existing niche even if they're equal,
837                                     // because this one is at a lower offset.
838                                     if largest_niche.available(dl) <= niche.available(dl) {
839                                         st.largest_niche = Some(niche);
840                                     }
841                                 }
842                                 None => st.largest_niche = Some(niche),
843                             }
844                         }
845                     }
846                     _ => assert!(
847                         start == Bound::Unbounded && end == Bound::Unbounded,
848                         "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
849                         def,
850                         st,
851                     ),
852                 }
853
854                 return Ok(tcx.intern_layout(st));
855             }
856
857             // At this point, we have handled all unions and
858             // structs. (We have also handled univariant enums
859             // that allow representation optimization.)
860             assert!(def.is_enum());
861
862             // Until we've decided whether to use the tagged or
863             // niche filling LayoutS, we don't want to intern the
864             // variant layouts, so we can't store them in the
865             // overall LayoutS. Store the overall LayoutS
866             // and the variant LayoutSs here until then.
867             struct TmpLayout<'tcx> {
868                 layout: LayoutS<'tcx>,
869                 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
870             }
871
872             let calculate_niche_filling_layout =
873                 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
874                     // The current code for niche-filling relies on variant indices
875                     // instead of actual discriminants, so enums with
876                     // explicit discriminants (RFC #2363) would misbehave.
877                     if def.repr().inhibit_enum_layout_opt()
878                         || def
879                             .variants()
880                             .iter_enumerated()
881                             .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
882                     {
883                         return Ok(None);
884                     }
885
886                     if variants.len() < 2 {
887                         return Ok(None);
888                     }
889
890                     let mut align = dl.aggregate_align;
891                     let mut variant_layouts = variants
892                         .iter_enumerated()
893                         .map(|(j, v)| {
894                             let mut st = univariant_uninterned(
895                                 cx,
896                                 ty,
897                                 v,
898                                 &def.repr(),
899                                 StructKind::AlwaysSized,
900                             )?;
901                             st.variants = Variants::Single { index: j };
902
903                             align = align.max(st.align);
904
905                             Ok(st)
906                         })
907                         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
908
909                     let largest_variant_index = match variant_layouts
910                         .iter_enumerated()
911                         .max_by_key(|(_i, layout)| layout.size.bytes())
912                         .map(|(i, _layout)| i)
913                     {
914                         None => return Ok(None),
915                         Some(i) => i,
916                     };
917
918                     let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
919                     let needs_disc = |index: VariantIdx| {
920                         index != largest_variant_index && !absent(&variants[index])
921                     };
922                     let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
923                         ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
924
925                     let count = niche_variants.size_hint().1.unwrap() as u128;
926
927                     // Find the field with the largest niche
928                     let (field_index, niche, (niche_start, niche_scalar)) = match variants
929                         [largest_variant_index]
930                         .iter()
931                         .enumerate()
932                         .filter_map(|(j, field)| Some((j, field.largest_niche?)))
933                         .max_by_key(|(_, niche)| niche.available(dl))
934                         .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
935                     {
936                         None => return Ok(None),
937                         Some(x) => x,
938                     };
939
940                     let niche_offset = niche.offset
941                         + variant_layouts[largest_variant_index].fields.offset(field_index);
942                     let niche_size = niche.value.size(dl);
943                     let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
944
945                     let all_variants_fit =
946                         variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
947                             if i == largest_variant_index {
948                                 return true;
949                             }
950
951                             layout.largest_niche = None;
952
953                             if layout.size <= niche_offset {
954                                 // This variant will fit before the niche.
955                                 return true;
956                             }
957
958                             // Determine if it'll fit after the niche.
959                             let this_align = layout.align.abi;
960                             let this_offset = (niche_offset + niche_size).align_to(this_align);
961
962                             if this_offset + layout.size > size {
963                                 return false;
964                             }
965
966                             // It'll fit, but we need to make some adjustments.
967                             match layout.fields {
968                                 FieldsShape::Arbitrary { ref mut offsets, .. } => {
969                                     for (j, offset) in offsets.iter_mut().enumerate() {
970                                         if !variants[i][j].is_zst() {
971                                             *offset += this_offset;
972                                         }
973                                     }
974                                 }
975                                 _ => {
976                                     panic!("Layout of fields should be Arbitrary for variants")
977                                 }
978                             }
979
980                             // It can't be a Scalar or ScalarPair because the offset isn't 0.
981                             if !layout.abi.is_uninhabited() {
982                                 layout.abi = Abi::Aggregate { sized: true };
983                             }
984                             layout.size += this_offset;
985
986                             true
987                         });
988
989                     if !all_variants_fit {
990                         return Ok(None);
991                     }
992
993                     let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
994
995                     let others_zst = variant_layouts
996                         .iter_enumerated()
997                         .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
998                     let same_size = size == variant_layouts[largest_variant_index].size;
999                     let same_align = align == variant_layouts[largest_variant_index].align;
1000
1001                     let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1002                         Abi::Uninhabited
1003                     } else if same_size && same_align && others_zst {
1004                         match variant_layouts[largest_variant_index].abi {
1005                             // When the total alignment and size match, we can use the
1006                             // same ABI as the scalar variant with the reserved niche.
1007                             Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1008                             Abi::ScalarPair(first, second) => {
1009                                 // Only the niche is guaranteed to be initialised,
1010                                 // so use union layouts for the other primitive.
1011                                 if niche_offset == Size::ZERO {
1012                                     Abi::ScalarPair(niche_scalar, second.to_union())
1013                                 } else {
1014                                     Abi::ScalarPair(first.to_union(), niche_scalar)
1015                                 }
1016                             }
1017                             _ => Abi::Aggregate { sized: true },
1018                         }
1019                     } else {
1020                         Abi::Aggregate { sized: true }
1021                     };
1022
1023                     let layout = LayoutS {
1024                         variants: Variants::Multiple {
1025                             tag: niche_scalar,
1026                             tag_encoding: TagEncoding::Niche {
1027                                 untagged_variant: largest_variant_index,
1028                                 niche_variants,
1029                                 niche_start,
1030                             },
1031                             tag_field: 0,
1032                             variants: IndexVec::new(),
1033                         },
1034                         fields: FieldsShape::Arbitrary {
1035                             offsets: vec![niche_offset],
1036                             memory_index: vec![0],
1037                         },
1038                         abi,
1039                         largest_niche,
1040                         size,
1041                         align,
1042                     };
1043
1044                     Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1045                 };
1046
1047             let niche_filling_layout = calculate_niche_filling_layout()?;
1048
1049             let (mut min, mut max) = (i128::MAX, i128::MIN);
1050             let discr_type = def.repr().discr_type();
1051             let bits = Integer::from_attr(cx, discr_type).size().bits();
1052             for (i, discr) in def.discriminants(tcx) {
1053                 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1054                     continue;
1055                 }
1056                 let mut x = discr.val as i128;
1057                 if discr_type.is_signed() {
1058                     // sign extend the raw representation to be an i128
1059                     x = (x << (128 - bits)) >> (128 - bits);
1060                 }
1061                 if x < min {
1062                     min = x;
1063                 }
1064                 if x > max {
1065                     max = x;
1066                 }
1067             }
1068             // We might have no inhabited variants, so pretend there's at least one.
1069             if (min, max) == (i128::MAX, i128::MIN) {
1070                 min = 0;
1071                 max = 0;
1072             }
1073             assert!(min <= max, "discriminant range is {}...{}", min, max);
1074             let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1075
1076             let mut align = dl.aggregate_align;
1077             let mut size = Size::ZERO;
1078
1079             // We're interested in the smallest alignment, so start large.
1080             let mut start_align = Align::from_bytes(256).unwrap();
1081             assert_eq!(Integer::for_align(dl, start_align), None);
1082
1083             // repr(C) on an enum tells us to make a (tag, union) layout,
1084             // so we need to grow the prefix alignment to be at least
1085             // the alignment of the union. (This value is used both for
1086             // determining the alignment of the overall enum, and the
1087             // determining the alignment of the payload after the tag.)
1088             let mut prefix_align = min_ity.align(dl).abi;
1089             if def.repr().c() {
1090                 for fields in &variants {
1091                     for field in fields {
1092                         prefix_align = prefix_align.max(field.align.abi);
1093                     }
1094                 }
1095             }
1096
1097             // Create the set of structs that represent each variant.
1098             let mut layout_variants = variants
1099                 .iter_enumerated()
1100                 .map(|(i, field_layouts)| {
1101                     let mut st = univariant_uninterned(
1102                         cx,
1103                         ty,
1104                         &field_layouts,
1105                         &def.repr(),
1106                         StructKind::Prefixed(min_ity.size(), prefix_align),
1107                     )?;
1108                     st.variants = Variants::Single { index: i };
1109                     // Find the first field we can't move later
1110                     // to make room for a larger discriminant.
1111                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1112                         if !field.is_zst() || field.align.abi.bytes() != 1 {
1113                             start_align = start_align.min(field.align.abi);
1114                             break;
1115                         }
1116                     }
1117                     size = cmp::max(size, st.size);
1118                     align = align.max(st.align);
1119                     Ok(st)
1120                 })
1121                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1122
1123             // Align the maximum variant size to the largest alignment.
1124             size = size.align_to(align.abi);
1125
1126             if size.bytes() >= dl.obj_size_bound() {
1127                 return Err(LayoutError::SizeOverflow(ty));
1128             }
1129
1130             let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1131             if typeck_ity < min_ity {
1132                 // It is a bug if Layout decided on a greater discriminant size than typeck for
1133                 // some reason at this point (based on values discriminant can take on). Mostly
1134                 // because this discriminant will be loaded, and then stored into variable of
1135                 // type calculated by typeck. Consider such case (a bug): typeck decided on
1136                 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1137                 // discriminant values. That would be a bug, because then, in codegen, in order
1138                 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1139                 // space necessary to represent would have to be discarded (or layout is wrong
1140                 // on thinking it needs 16 bits)
1141                 bug!(
1142                     "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1143                     min_ity,
1144                     typeck_ity
1145                 );
1146                 // However, it is fine to make discr type however large (as an optimisation)
1147                 // after this point â€“ we’ll just truncate the value we load in codegen.
1148             }
1149
1150             // Check to see if we should use a different type for the
1151             // discriminant. We can safely use a type with the same size
1152             // as the alignment of the first field of each variant.
1153             // We increase the size of the discriminant to avoid LLVM copying
1154             // padding when it doesn't need to. This normally causes unaligned
1155             // load/stores and excessive memcpy/memset operations. By using a
1156             // bigger integer size, LLVM can be sure about its contents and
1157             // won't be so conservative.
1158
1159             // Use the initial field alignment
1160             let mut ity = if def.repr().c() || def.repr().int.is_some() {
1161                 min_ity
1162             } else {
1163                 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1164             };
1165
1166             // If the alignment is not larger than the chosen discriminant size,
1167             // don't use the alignment as the final size.
1168             if ity <= min_ity {
1169                 ity = min_ity;
1170             } else {
1171                 // Patch up the variants' first few fields.
1172                 let old_ity_size = min_ity.size();
1173                 let new_ity_size = ity.size();
1174                 for variant in &mut layout_variants {
1175                     match variant.fields {
1176                         FieldsShape::Arbitrary { ref mut offsets, .. } => {
1177                             for i in offsets {
1178                                 if *i <= old_ity_size {
1179                                     assert_eq!(*i, old_ity_size);
1180                                     *i = new_ity_size;
1181                                 }
1182                             }
1183                             // We might be making the struct larger.
1184                             if variant.size <= old_ity_size {
1185                                 variant.size = new_ity_size;
1186                             }
1187                         }
1188                         _ => bug!(),
1189                     }
1190                 }
1191             }
1192
1193             let tag_mask = ity.size().unsigned_int_max();
1194             let tag = Scalar::Initialized {
1195                 value: Int(ity, signed),
1196                 valid_range: WrappingRange {
1197                     start: (min as u128 & tag_mask),
1198                     end: (max as u128 & tag_mask),
1199                 },
1200             };
1201             let mut abi = Abi::Aggregate { sized: true };
1202
1203             if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1204                 abi = Abi::Uninhabited;
1205             } else if tag.size(dl) == size {
1206                 // Make sure we only use scalar layout when the enum is entirely its
1207                 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1208                 abi = Abi::Scalar(tag);
1209             } else {
1210                 // Try to use a ScalarPair for all tagged enums.
1211                 let mut common_prim = None;
1212                 let mut common_prim_initialized_in_all_variants = true;
1213                 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1214                     let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1215                             bug!();
1216                         };
1217                     let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1218                     let (field, offset) = match (fields.next(), fields.next()) {
1219                         (None, None) => {
1220                             common_prim_initialized_in_all_variants = false;
1221                             continue;
1222                         }
1223                         (Some(pair), None) => pair,
1224                         _ => {
1225                             common_prim = None;
1226                             break;
1227                         }
1228                     };
1229                     let prim = match field.abi {
1230                         Abi::Scalar(scalar) => {
1231                             common_prim_initialized_in_all_variants &=
1232                                 matches!(scalar, Scalar::Initialized { .. });
1233                             scalar.primitive()
1234                         }
1235                         _ => {
1236                             common_prim = None;
1237                             break;
1238                         }
1239                     };
1240                     if let Some(pair) = common_prim {
1241                         // This is pretty conservative. We could go fancier
1242                         // by conflating things like i32 and u32, or even
1243                         // realising that (u8, u8) could just cohabit with
1244                         // u16 or even u32.
1245                         if pair != (prim, offset) {
1246                             common_prim = None;
1247                             break;
1248                         }
1249                     } else {
1250                         common_prim = Some((prim, offset));
1251                     }
1252                 }
1253                 if let Some((prim, offset)) = common_prim {
1254                     let prim_scalar = if common_prim_initialized_in_all_variants {
1255                         scalar_unit(prim)
1256                     } else {
1257                         // Common prim might be uninit.
1258                         Scalar::Union { value: prim }
1259                     };
1260                     let pair = scalar_pair(cx, tag, prim_scalar);
1261                     let pair_offsets = match pair.fields {
1262                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1263                             assert_eq!(memory_index, &[0, 1]);
1264                             offsets
1265                         }
1266                         _ => bug!(),
1267                     };
1268                     if pair_offsets[0] == Size::ZERO
1269                         && pair_offsets[1] == *offset
1270                         && align == pair.align
1271                         && size == pair.size
1272                     {
1273                         // We can use `ScalarPair` only when it matches our
1274                         // already computed layout (including `#[repr(C)]`).
1275                         abi = pair.abi;
1276                     }
1277                 }
1278             }
1279
1280             // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1281             // variants to ensure they are consistent. This is because a downcast is
1282             // semantically a NOP, and thus should not affect layout.
1283             if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1284                 for variant in &mut layout_variants {
1285                     // We only do this for variants with fields; the others are not accessed anyway.
1286                     // Also do not overwrite any already existing "clever" ABIs.
1287                     if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
1288                         variant.abi = abi;
1289                         // Also need to bump up the size and alignment, so that the entire value fits in here.
1290                         variant.size = cmp::max(variant.size, size);
1291                         variant.align.abi = cmp::max(variant.align.abi, align.abi);
1292                     }
1293                 }
1294             }
1295
1296             let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1297
1298             let tagged_layout = LayoutS {
1299                 variants: Variants::Multiple {
1300                     tag,
1301                     tag_encoding: TagEncoding::Direct,
1302                     tag_field: 0,
1303                     variants: IndexVec::new(),
1304                 },
1305                 fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
1306                 largest_niche,
1307                 abi,
1308                 align,
1309                 size,
1310             };
1311
1312             let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1313
1314             let mut best_layout = match (tagged_layout, niche_filling_layout) {
1315                 (tl, Some(nl)) => {
1316                     // Pick the smaller layout; otherwise,
1317                     // pick the layout with the larger niche; otherwise,
1318                     // pick tagged as it has simpler codegen.
1319                     use Ordering::*;
1320                     let niche_size = |tmp_l: &TmpLayout<'_>| {
1321                         tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1322                     };
1323                     match (
1324                         tl.layout.size.cmp(&nl.layout.size),
1325                         niche_size(&tl).cmp(&niche_size(&nl)),
1326                     ) {
1327                         (Greater, _) => nl,
1328                         (Equal, Less) => nl,
1329                         _ => tl,
1330                     }
1331                 }
1332                 (tl, None) => tl,
1333             };
1334
1335             // Now we can intern the variant layouts and store them in the enum layout.
1336             best_layout.layout.variants = match best_layout.layout.variants {
1337                 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1338                     tag,
1339                     tag_encoding,
1340                     tag_field,
1341                     variants: best_layout
1342                         .variants
1343                         .into_iter()
1344                         .map(|layout| tcx.intern_layout(layout))
1345                         .collect(),
1346                 },
1347                 _ => bug!(),
1348             };
1349
1350             tcx.intern_layout(best_layout.layout)
1351         }
1352
1353         // Types with no meaningful known layout.
1354         ty::Projection(_) | ty::Opaque(..) => {
1355             // NOTE(eddyb) `layout_of` query should've normalized these away,
1356             // if that was possible, so there's no reason to try again here.
1357             return Err(LayoutError::Unknown(ty));
1358         }
1359
1360         ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1361             bug!("Layout::compute: unexpected type `{}`", ty)
1362         }
1363
1364         ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1365             return Err(LayoutError::Unknown(ty));
1366         }
1367     })
1368 }
1369
1370 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1371 #[derive(Clone, Debug, PartialEq)]
1372 enum SavedLocalEligibility {
1373     Unassigned,
1374     Assigned(VariantIdx),
1375     // FIXME: Use newtype_index so we aren't wasting bytes
1376     Ineligible(Option<u32>),
1377 }
1378
1379 // When laying out generators, we divide our saved local fields into two
1380 // categories: overlap-eligible and overlap-ineligible.
1381 //
1382 // Those fields which are ineligible for overlap go in a "prefix" at the
1383 // beginning of the layout, and always have space reserved for them.
1384 //
1385 // Overlap-eligible fields are only assigned to one variant, so we lay
1386 // those fields out for each variant and put them right after the
1387 // prefix.
1388 //
1389 // Finally, in the layout details, we point to the fields from the
1390 // variants they are assigned to. It is possible for some fields to be
1391 // included in multiple variants. No field ever "moves around" in the
1392 // layout; its offset is always the same.
1393 //
1394 // Also included in the layout are the upvars and the discriminant.
1395 // These are included as fields on the "outer" layout; they are not part
1396 // of any variant.
1397
1398 /// Compute the eligibility and assignment of each local.
1399 fn generator_saved_local_eligibility<'tcx>(
1400     info: &GeneratorLayout<'tcx>,
1401 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1402     use SavedLocalEligibility::*;
1403
1404     let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1405         IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1406
1407     // The saved locals not eligible for overlap. These will get
1408     // "promoted" to the prefix of our generator.
1409     let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1410
1411     // Figure out which of our saved locals are fields in only
1412     // one variant. The rest are deemed ineligible for overlap.
1413     for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1414         for local in fields {
1415             match assignments[*local] {
1416                 Unassigned => {
1417                     assignments[*local] = Assigned(variant_index);
1418                 }
1419                 Assigned(idx) => {
1420                     // We've already seen this local at another suspension
1421                     // point, so it is no longer a candidate.
1422                     trace!(
1423                         "removing local {:?} in >1 variant ({:?}, {:?})",
1424                         local,
1425                         variant_index,
1426                         idx
1427                     );
1428                     ineligible_locals.insert(*local);
1429                     assignments[*local] = Ineligible(None);
1430                 }
1431                 Ineligible(_) => {}
1432             }
1433         }
1434     }
1435
1436     // Next, check every pair of eligible locals to see if they
1437     // conflict.
1438     for local_a in info.storage_conflicts.rows() {
1439         let conflicts_a = info.storage_conflicts.count(local_a);
1440         if ineligible_locals.contains(local_a) {
1441             continue;
1442         }
1443
1444         for local_b in info.storage_conflicts.iter(local_a) {
1445             // local_a and local_b are storage live at the same time, therefore they
1446             // cannot overlap in the generator layout. The only way to guarantee
1447             // this is if they are in the same variant, or one is ineligible
1448             // (which means it is stored in every variant).
1449             if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
1450                 continue;
1451             }
1452
1453             // If they conflict, we will choose one to make ineligible.
1454             // This is not always optimal; it's just a greedy heuristic that
1455             // seems to produce good results most of the time.
1456             let conflicts_b = info.storage_conflicts.count(local_b);
1457             let (remove, other) =
1458                 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1459             ineligible_locals.insert(remove);
1460             assignments[remove] = Ineligible(None);
1461             trace!("removing local {:?} due to conflict with {:?}", remove, other);
1462         }
1463     }
1464
1465     // Count the number of variants in use. If only one of them, then it is
1466     // impossible to overlap any locals in our layout. In this case it's
1467     // always better to make the remaining locals ineligible, so we can
1468     // lay them out with the other locals in the prefix and eliminate
1469     // unnecessary padding bytes.
1470     {
1471         let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1472         for assignment in &assignments {
1473             if let Assigned(idx) = assignment {
1474                 used_variants.insert(*idx);
1475             }
1476         }
1477         if used_variants.count() < 2 {
1478             for assignment in assignments.iter_mut() {
1479                 *assignment = Ineligible(None);
1480             }
1481             ineligible_locals.insert_all();
1482         }
1483     }
1484
1485     // Write down the order of our locals that will be promoted to the prefix.
1486     {
1487         for (idx, local) in ineligible_locals.iter().enumerate() {
1488             assignments[local] = Ineligible(Some(idx as u32));
1489         }
1490     }
1491     debug!("generator saved local assignments: {:?}", assignments);
1492
1493     (ineligible_locals, assignments)
1494 }
1495
1496 /// Compute the full generator layout.
1497 fn generator_layout<'tcx>(
1498     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1499     ty: Ty<'tcx>,
1500     def_id: hir::def_id::DefId,
1501     substs: SubstsRef<'tcx>,
1502 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1503     use SavedLocalEligibility::*;
1504     let tcx = cx.tcx;
1505     let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1506
1507     let Some(info) = tcx.generator_layout(def_id) else {
1508             return Err(LayoutError::Unknown(ty));
1509         };
1510     let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
1511
1512     // Build a prefix layout, including "promoting" all ineligible
1513     // locals as part of the prefix. We compute the layout of all of
1514     // these fields at once to get optimal packing.
1515     let tag_index = substs.as_generator().prefix_tys().count();
1516
1517     // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1518     let max_discr = (info.variant_fields.len() - 1) as u128;
1519     let discr_int = Integer::fit_unsigned(max_discr);
1520     let discr_int_ty = discr_int.to_ty(tcx, false);
1521     let tag = Scalar::Initialized {
1522         value: Primitive::Int(discr_int, false),
1523         valid_range: WrappingRange { start: 0, end: max_discr },
1524     };
1525     let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
1526     let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1527
1528     let promoted_layouts = ineligible_locals
1529         .iter()
1530         .map(|local| subst_field(info.field_tys[local]))
1531         .map(|ty| tcx.mk_maybe_uninit(ty))
1532         .map(|ty| cx.layout_of(ty));
1533     let prefix_layouts = substs
1534         .as_generator()
1535         .prefix_tys()
1536         .map(|ty| cx.layout_of(ty))
1537         .chain(iter::once(Ok(tag_layout)))
1538         .chain(promoted_layouts)
1539         .collect::<Result<Vec<_>, _>>()?;
1540     let prefix = univariant_uninterned(
1541         cx,
1542         ty,
1543         &prefix_layouts,
1544         &ReprOptions::default(),
1545         StructKind::AlwaysSized,
1546     )?;
1547
1548     let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1549
1550     // Split the prefix layout into the "outer" fields (upvars and
1551     // discriminant) and the "promoted" fields. Promoted fields will
1552     // get included in each variant that requested them in
1553     // GeneratorLayout.
1554     debug!("prefix = {:#?}", prefix);
1555     let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1556         FieldsShape::Arbitrary { mut offsets, memory_index } => {
1557             let mut inverse_memory_index = invert_mapping(&memory_index);
1558
1559             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1560             // "outer" and "promoted" fields respectively.
1561             let b_start = (tag_index + 1) as u32;
1562             let offsets_b = offsets.split_off(b_start as usize);
1563             let offsets_a = offsets;
1564
1565             // Disentangle the "a" and "b" components of `inverse_memory_index`
1566             // by preserving the order but keeping only one disjoint "half" each.
1567             // FIXME(eddyb) build a better abstraction for permutations, if possible.
1568             let inverse_memory_index_b: Vec<_> =
1569                 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1570             inverse_memory_index.retain(|&i| i < b_start);
1571             let inverse_memory_index_a = inverse_memory_index;
1572
1573             // Since `inverse_memory_index_{a,b}` each only refer to their
1574             // respective fields, they can be safely inverted
1575             let memory_index_a = invert_mapping(&inverse_memory_index_a);
1576             let memory_index_b = invert_mapping(&inverse_memory_index_b);
1577
1578             let outer_fields =
1579                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1580             (outer_fields, offsets_b, memory_index_b)
1581         }
1582         _ => bug!(),
1583     };
1584
1585     let mut size = prefix.size;
1586     let mut align = prefix.align;
1587     let variants = info
1588         .variant_fields
1589         .iter_enumerated()
1590         .map(|(index, variant_fields)| {
1591             // Only include overlap-eligible fields when we compute our variant layout.
1592             let variant_only_tys = variant_fields
1593                 .iter()
1594                 .filter(|local| match assignments[**local] {
1595                     Unassigned => bug!(),
1596                     Assigned(v) if v == index => true,
1597                     Assigned(_) => bug!("assignment does not match variant"),
1598                     Ineligible(_) => false,
1599                 })
1600                 .map(|local| subst_field(info.field_tys[*local]));
1601
1602             let mut variant = univariant_uninterned(
1603                 cx,
1604                 ty,
1605                 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1606                 &ReprOptions::default(),
1607                 StructKind::Prefixed(prefix_size, prefix_align.abi),
1608             )?;
1609             variant.variants = Variants::Single { index };
1610
1611             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1612                     bug!();
1613                 };
1614
1615             // Now, stitch the promoted and variant-only fields back together in
1616             // the order they are mentioned by our GeneratorLayout.
1617             // Because we only use some subset (that can differ between variants)
1618             // of the promoted fields, we can't just pick those elements of the
1619             // `promoted_memory_index` (as we'd end up with gaps).
1620             // So instead, we build an "inverse memory_index", as if all of the
1621             // promoted fields were being used, but leave the elements not in the
1622             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1623             // obtain a valid (bijective) mapping.
1624             const INVALID_FIELD_IDX: u32 = !0;
1625             let mut combined_inverse_memory_index =
1626                 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1627             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1628             let combined_offsets = variant_fields
1629                 .iter()
1630                 .enumerate()
1631                 .map(|(i, local)| {
1632                     let (offset, memory_index) = match assignments[*local] {
1633                         Unassigned => bug!(),
1634                         Assigned(_) => {
1635                             let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1636                             (offset, promoted_memory_index.len() as u32 + memory_index)
1637                         }
1638                         Ineligible(field_idx) => {
1639                             let field_idx = field_idx.unwrap() as usize;
1640                             (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1641                         }
1642                     };
1643                     combined_inverse_memory_index[memory_index as usize] = i as u32;
1644                     offset
1645                 })
1646                 .collect();
1647
1648             // Remove the unused slots and invert the mapping to obtain the
1649             // combined `memory_index` (also see previous comment).
1650             combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1651             let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1652
1653             variant.fields = FieldsShape::Arbitrary {
1654                 offsets: combined_offsets,
1655                 memory_index: combined_memory_index,
1656             };
1657
1658             size = size.max(variant.size);
1659             align = align.max(variant.align);
1660             Ok(tcx.intern_layout(variant))
1661         })
1662         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1663
1664     size = size.align_to(align.abi);
1665
1666     let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1667         Abi::Uninhabited
1668     } else {
1669         Abi::Aggregate { sized: true }
1670     };
1671
1672     let layout = tcx.intern_layout(LayoutS {
1673         variants: Variants::Multiple {
1674             tag,
1675             tag_encoding: TagEncoding::Direct,
1676             tag_field: tag_index,
1677             variants,
1678         },
1679         fields: outer_fields,
1680         abi,
1681         largest_niche: prefix.largest_niche,
1682         size,
1683         align,
1684     });
1685     debug!("generator layout ({:?}): {:#?}", ty, layout);
1686     Ok(layout)
1687 }
1688
1689 /// This is invoked by the `layout_of` query to record the final
1690 /// layout of each type.
1691 #[inline(always)]
1692 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
1693     // If we are running with `-Zprint-type-sizes`, maybe record layouts
1694     // for dumping later.
1695     if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
1696         record_layout_for_printing_outlined(cx, layout)
1697     }
1698 }
1699
1700 fn record_layout_for_printing_outlined<'tcx>(
1701     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1702     layout: TyAndLayout<'tcx>,
1703 ) {
1704     // Ignore layouts that are done with non-empty environments or
1705     // non-monomorphic layouts, as the user only wants to see the stuff
1706     // resulting from the final codegen session.
1707     if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
1708         return;
1709     }
1710
1711     // (delay format until we actually need it)
1712     let record = |kind, packed, opt_discr_size, variants| {
1713         let type_desc = format!("{:?}", layout.ty);
1714         cx.tcx.sess.code_stats.record_type_size(
1715             kind,
1716             type_desc,
1717             layout.align.abi,
1718             layout.size,
1719             packed,
1720             opt_discr_size,
1721             variants,
1722         );
1723     };
1724
1725     let adt_def = match *layout.ty.kind() {
1726         ty::Adt(ref adt_def, _) => {
1727             debug!("print-type-size t: `{:?}` process adt", layout.ty);
1728             adt_def
1729         }
1730
1731         ty::Closure(..) => {
1732             debug!("print-type-size t: `{:?}` record closure", layout.ty);
1733             record(DataTypeKind::Closure, false, None, vec![]);
1734             return;
1735         }
1736
1737         _ => {
1738             debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1739             return;
1740         }
1741     };
1742
1743     let adt_kind = adt_def.adt_kind();
1744     let adt_packed = adt_def.repr().pack.is_some();
1745
1746     let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1747         let mut min_size = Size::ZERO;
1748         let field_info: Vec<_> = flds
1749             .iter()
1750             .enumerate()
1751             .map(|(i, &name)| {
1752                 let field_layout = layout.field(cx, i);
1753                 let offset = layout.fields.offset(i);
1754                 let field_end = offset + field_layout.size;
1755                 if min_size < field_end {
1756                     min_size = field_end;
1757                 }
1758                 FieldInfo {
1759                     name,
1760                     offset: offset.bytes(),
1761                     size: field_layout.size.bytes(),
1762                     align: field_layout.align.abi.bytes(),
1763                 }
1764             })
1765             .collect();
1766
1767         VariantInfo {
1768             name: n,
1769             kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1770             align: layout.align.abi.bytes(),
1771             size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1772             fields: field_info,
1773         }
1774     };
1775
1776     match layout.variants {
1777         Variants::Single { index } => {
1778             if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1779                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
1780                 let variant_def = &adt_def.variant(index);
1781                 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1782                 record(
1783                     adt_kind.into(),
1784                     adt_packed,
1785                     None,
1786                     vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1787                 );
1788             } else {
1789                 // (This case arises for *empty* enums; so give it
1790                 // zero variants.)
1791                 record(adt_kind.into(), adt_packed, None, vec![]);
1792             }
1793         }
1794
1795         Variants::Multiple { tag, ref tag_encoding, .. } => {
1796             debug!(
1797                 "print-type-size `{:#?}` adt general variants def {}",
1798                 layout.ty,
1799                 adt_def.variants().len()
1800             );
1801             let variant_infos: Vec<_> = adt_def
1802                 .variants()
1803                 .iter_enumerated()
1804                 .map(|(i, variant_def)| {
1805                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1806                     build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
1807                 })
1808                 .collect();
1809             record(
1810                 adt_kind.into(),
1811                 adt_packed,
1812                 match tag_encoding {
1813                     TagEncoding::Direct => Some(tag.size(cx)),
1814                     _ => None,
1815                 },
1816                 variant_infos,
1817             );
1818         }
1819     }
1820 }