]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_ty_utils/src/layout.rs
Rollup merge of #103851 - viandoxdev:103816_bootstrap_fix_json_doc, r=jyn514
[rust.git] / compiler / rustc_ty_utils / src / layout.rs
1 use rustc_hir as hir;
2 use rustc_index::bit_set::BitSet;
3 use rustc_index::vec::{Idx, IndexVec};
4 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
5 use rustc_middle::ty::layout::{
6     IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
7 };
8 use rustc_middle::ty::{
9     self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
10 };
11 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
12 use rustc_span::symbol::Symbol;
13 use rustc_span::DUMMY_SP;
14 use rustc_target::abi::*;
15
16 use std::cmp::{self, Ordering};
17 use std::iter;
18 use std::num::NonZeroUsize;
19 use std::ops::Bound;
20
21 use rand::{seq::SliceRandom, SeedableRng};
22 use rand_xoshiro::Xoshiro128StarStar;
23
24 use crate::layout_sanity_check::sanity_check_layout;
25
26 pub fn provide(providers: &mut ty::query::Providers) {
27     *providers = ty::query::Providers { layout_of, ..*providers };
28 }
29
30 #[instrument(skip(tcx, query), level = "debug")]
31 fn layout_of<'tcx>(
32     tcx: TyCtxt<'tcx>,
33     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
34 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
35     let (param_env, ty) = query.into_parts();
36     debug!(?ty);
37
38     let param_env = param_env.with_reveal_all_normalized(tcx);
39     let unnormalized_ty = ty;
40
41     // FIXME: We might want to have two different versions of `layout_of`:
42     // One that can be called after typecheck has completed and can use
43     // `normalize_erasing_regions` here and another one that can be called
44     // before typecheck has completed and uses `try_normalize_erasing_regions`.
45     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
46         Ok(t) => t,
47         Err(normalization_error) => {
48             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
49         }
50     };
51
52     if ty != unnormalized_ty {
53         // Ensure this layout is also cached for the normalized type.
54         return tcx.layout_of(param_env.and(ty));
55     }
56
57     let cx = LayoutCx { tcx, param_env };
58
59     let layout = layout_of_uncached(&cx, ty)?;
60     let layout = TyAndLayout { ty, layout };
61
62     record_layout_for_printing(&cx, layout);
63
64     sanity_check_layout(&cx, &layout);
65
66     Ok(layout)
67 }
68
69 #[derive(Copy, Clone, Debug)]
70 enum StructKind {
71     /// A tuple, closure, or univariant which cannot be coerced to unsized.
72     AlwaysSized,
73     /// A univariant, the last field of which may be coerced to unsized.
74     MaybeUnsized,
75     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
76     Prefixed(Size, Align),
77 }
78
79 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
80 // This is used to go between `memory_index` (source field order to memory order)
81 // and `inverse_memory_index` (memory order to source field order).
82 // See also `FieldsShape::Arbitrary::memory_index` for more details.
83 // FIXME(eddyb) build a better abstraction for permutations, if possible.
84 fn invert_mapping(map: &[u32]) -> Vec<u32> {
85     let mut inverse = vec![0; map.len()];
86     for i in 0..map.len() {
87         inverse[map[i] as usize] = i as u32;
88     }
89     inverse
90 }
91
92 fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
93     let dl = cx.data_layout();
94     let b_align = b.align(dl);
95     let align = a.align(dl).max(b_align).max(dl.aggregate_align);
96     let b_offset = a.size(dl).align_to(b_align.abi);
97     let size = (b_offset + b.size(dl)).align_to(align.abi);
98
99     // HACK(nox): We iter on `b` and then `a` because `max_by_key`
100     // returns the last maximum.
101     let largest_niche = Niche::from_scalar(dl, b_offset, b)
102         .into_iter()
103         .chain(Niche::from_scalar(dl, Size::ZERO, a))
104         .max_by_key(|niche| niche.available(dl));
105
106     LayoutS {
107         variants: Variants::Single { index: VariantIdx::new(0) },
108         fields: FieldsShape::Arbitrary {
109             offsets: vec![Size::ZERO, b_offset],
110             memory_index: vec![0, 1],
111         },
112         abi: Abi::ScalarPair(a, b),
113         largest_niche,
114         align,
115         size,
116     }
117 }
118
119 fn univariant_uninterned<'tcx>(
120     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
121     ty: Ty<'tcx>,
122     fields: &[TyAndLayout<'_>],
123     repr: &ReprOptions,
124     kind: StructKind,
125 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
126     let dl = cx.data_layout();
127     let pack = repr.pack;
128     if pack.is_some() && repr.align.is_some() {
129         cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
130         return Err(LayoutError::Unknown(ty));
131     }
132
133     let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
134
135     let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
136
137     let optimize = !repr.inhibit_struct_field_reordering_opt();
138     if optimize {
139         let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
140         let optimizing = &mut inverse_memory_index[..end];
141         let field_align = |f: &TyAndLayout<'_>| {
142             if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
143         };
144
145         // If `-Z randomize-layout` was enabled for the type definition we can shuffle
146         // the field ordering to try and catch some code making assumptions about layouts
147         // we don't guarantee
148         if repr.can_randomize_type_layout() {
149             // `ReprOptions.layout_seed` is a deterministic seed that we can use to
150             // randomize field ordering with
151             let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
152
153             // Shuffle the ordering of the fields
154             optimizing.shuffle(&mut rng);
155
156             // Otherwise we just leave things alone and actually optimize the type's fields
157         } else {
158             match kind {
159                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
160                     optimizing.sort_by_key(|&x| {
161                         // Place ZSTs first to avoid "interesting offsets",
162                         // especially with only one or two non-ZST fields.
163                         let f = &fields[x as usize];
164                         (!f.is_zst(), cmp::Reverse(field_align(f)))
165                     });
166                 }
167
168                 StructKind::Prefixed(..) => {
169                     // Sort in ascending alignment so that the layout stays optimal
170                     // regardless of the prefix
171                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
172                 }
173             }
174
175             // FIXME(Kixiron): We can always shuffle fields within a given alignment class
176             //                 regardless of the status of `-Z randomize-layout`
177         }
178     }
179
180     // inverse_memory_index holds field indices by increasing memory offset.
181     // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
182     // We now write field offsets to the corresponding offset slot;
183     // field 5 with offset 0 puts 0 in offsets[5].
184     // At the bottom of this function, we invert `inverse_memory_index` to
185     // produce `memory_index` (see `invert_mapping`).
186
187     let mut sized = true;
188     let mut offsets = vec![Size::ZERO; fields.len()];
189     let mut offset = Size::ZERO;
190     let mut largest_niche = None;
191     let mut largest_niche_available = 0;
192
193     if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
194         let prefix_align =
195             if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
196         align = align.max(AbiAndPrefAlign::new(prefix_align));
197         offset = prefix_size.align_to(prefix_align);
198     }
199
200     for &i in &inverse_memory_index {
201         let field = fields[i as usize];
202         if !sized {
203             cx.tcx.sess.delay_span_bug(
204                 DUMMY_SP,
205                 &format!(
206                     "univariant: field #{} of `{}` comes after unsized field",
207                     offsets.len(),
208                     ty
209                 ),
210             );
211         }
212
213         if field.is_unsized() {
214             sized = false;
215         }
216
217         // Invariant: offset < dl.obj_size_bound() <= 1<<61
218         let field_align = if let Some(pack) = pack {
219             field.align.min(AbiAndPrefAlign::new(pack))
220         } else {
221             field.align
222         };
223         offset = offset.align_to(field_align.abi);
224         align = align.max(field_align);
225
226         debug!("univariant offset: {:?} field: {:#?}", offset, field);
227         offsets[i as usize] = offset;
228
229         if let Some(mut niche) = field.largest_niche {
230             let available = niche.available(dl);
231             if available > largest_niche_available {
232                 largest_niche_available = available;
233                 niche.offset += offset;
234                 largest_niche = Some(niche);
235             }
236         }
237
238         offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
239     }
240
241     if let Some(repr_align) = repr.align {
242         align = align.max(AbiAndPrefAlign::new(repr_align));
243     }
244
245     debug!("univariant min_size: {:?}", offset);
246     let min_size = offset;
247
248     // As stated above, inverse_memory_index holds field indices by increasing offset.
249     // This makes it an already-sorted view of the offsets vec.
250     // To invert it, consider:
251     // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
252     // Field 5 would be the first element, so memory_index is i:
253     // Note: if we didn't optimize, it's already right.
254
255     let memory_index =
256         if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
257
258     let size = min_size.align_to(align.abi);
259     let mut abi = Abi::Aggregate { sized };
260
261     // Unpack newtype ABIs and find scalar pairs.
262     if sized && size.bytes() > 0 {
263         // All other fields must be ZSTs.
264         let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
265
266         match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
267             // We have exactly one non-ZST field.
268             (Some((i, field)), None, None) => {
269                 // Field fills the struct and it has a scalar or scalar pair ABI.
270                 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
271                     match field.abi {
272                         // For plain scalars, or vectors of them, we can't unpack
273                         // newtypes for `#[repr(C)]`, as that affects C ABIs.
274                         Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
275                             abi = field.abi;
276                         }
277                         // But scalar pairs are Rust-specific and get
278                         // treated as aggregates by C ABIs anyway.
279                         Abi::ScalarPair(..) => {
280                             abi = field.abi;
281                         }
282                         _ => {}
283                     }
284                 }
285             }
286
287             // Two non-ZST fields, and they're both scalars.
288             (Some((i, a)), Some((j, b)), None) => {
289                 match (a.abi, b.abi) {
290                     (Abi::Scalar(a), Abi::Scalar(b)) => {
291                         // Order by the memory placement, not source order.
292                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
293                             ((i, a), (j, b))
294                         } else {
295                             ((j, b), (i, a))
296                         };
297                         let pair = scalar_pair(cx, a, b);
298                         let pair_offsets = match pair.fields {
299                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
300                                 assert_eq!(memory_index, &[0, 1]);
301                                 offsets
302                             }
303                             _ => bug!(),
304                         };
305                         if offsets[i] == pair_offsets[0]
306                             && offsets[j] == pair_offsets[1]
307                             && align == pair.align
308                             && size == pair.size
309                         {
310                             // We can use `ScalarPair` only when it matches our
311                             // already computed layout (including `#[repr(C)]`).
312                             abi = pair.abi;
313                         }
314                     }
315                     _ => {}
316                 }
317             }
318
319             _ => {}
320         }
321     }
322
323     if fields.iter().any(|f| f.abi.is_uninhabited()) {
324         abi = Abi::Uninhabited;
325     }
326
327     Ok(LayoutS {
328         variants: Variants::Single { index: VariantIdx::new(0) },
329         fields: FieldsShape::Arbitrary { offsets, memory_index },
330         abi,
331         largest_niche,
332         align,
333         size,
334     })
335 }
336
337 fn layout_of_uncached<'tcx>(
338     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
339     ty: Ty<'tcx>,
340 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
341     let tcx = cx.tcx;
342     let param_env = cx.param_env;
343     let dl = cx.data_layout();
344     let scalar_unit = |value: Primitive| {
345         let size = value.size(dl);
346         assert!(size.bits() <= 128);
347         Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
348     };
349     let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
350
351     let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
352         Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
353     };
354     debug_assert!(!ty.has_non_region_infer());
355
356     Ok(match *ty.kind() {
357         // Basic scalars.
358         ty::Bool => tcx.intern_layout(LayoutS::scalar(
359             cx,
360             Scalar::Initialized {
361                 value: Int(I8, false),
362                 valid_range: WrappingRange { start: 0, end: 1 },
363             },
364         )),
365         ty::Char => tcx.intern_layout(LayoutS::scalar(
366             cx,
367             Scalar::Initialized {
368                 value: Int(I32, false),
369                 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
370             },
371         )),
372         ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
373         ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
374         ty::Float(fty) => scalar(match fty {
375             ty::FloatTy::F32 => F32,
376             ty::FloatTy::F64 => F64,
377         }),
378         ty::FnPtr(_) => {
379             let mut ptr = scalar_unit(Pointer);
380             ptr.valid_range_mut().start = 1;
381             tcx.intern_layout(LayoutS::scalar(cx, ptr))
382         }
383
384         // The never type.
385         ty::Never => tcx.intern_layout(LayoutS {
386             variants: Variants::Single { index: VariantIdx::new(0) },
387             fields: FieldsShape::Primitive,
388             abi: Abi::Uninhabited,
389             largest_niche: None,
390             align: dl.i8_align,
391             size: Size::ZERO,
392         }),
393
394         // Potentially-wide pointers.
395         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
396             let mut data_ptr = scalar_unit(Pointer);
397             if !ty.is_unsafe_ptr() {
398                 data_ptr.valid_range_mut().start = 1;
399             }
400
401             let pointee = tcx.normalize_erasing_regions(param_env, pointee);
402             if pointee.is_sized(tcx, param_env) {
403                 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
404             }
405
406             let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
407             let metadata = match unsized_part.kind() {
408                 ty::Foreign(..) => {
409                     return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
410                 }
411                 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
412                 ty::Dynamic(..) => {
413                     let mut vtable = scalar_unit(Pointer);
414                     vtable.valid_range_mut().start = 1;
415                     vtable
416                 }
417                 _ => return Err(LayoutError::Unknown(unsized_part)),
418             };
419
420             // Effectively a (ptr, meta) tuple.
421             tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
422         }
423
424         ty::Dynamic(_, _, ty::DynStar) => {
425             let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
426             data.valid_range_mut().start = 0;
427             let mut vtable = scalar_unit(Pointer);
428             vtable.valid_range_mut().start = 1;
429             tcx.intern_layout(scalar_pair(cx, data, vtable))
430         }
431
432         // Arrays and slices.
433         ty::Array(element, mut count) => {
434             if count.has_projections() {
435                 count = tcx.normalize_erasing_regions(param_env, count);
436                 if count.has_projections() {
437                     return Err(LayoutError::Unknown(ty));
438                 }
439             }
440
441             let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
442             let element = cx.layout_of(element)?;
443             let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
444
445             let abi = if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty))
446             {
447                 Abi::Uninhabited
448             } else {
449                 Abi::Aggregate { sized: true }
450             };
451
452             let largest_niche = if count != 0 { element.largest_niche } else { None };
453
454             tcx.intern_layout(LayoutS {
455                 variants: Variants::Single { index: VariantIdx::new(0) },
456                 fields: FieldsShape::Array { stride: element.size, count },
457                 abi,
458                 largest_niche,
459                 align: element.align,
460                 size,
461             })
462         }
463         ty::Slice(element) => {
464             let element = cx.layout_of(element)?;
465             tcx.intern_layout(LayoutS {
466                 variants: Variants::Single { index: VariantIdx::new(0) },
467                 fields: FieldsShape::Array { stride: element.size, count: 0 },
468                 abi: Abi::Aggregate { sized: false },
469                 largest_niche: None,
470                 align: element.align,
471                 size: Size::ZERO,
472             })
473         }
474         ty::Str => tcx.intern_layout(LayoutS {
475             variants: Variants::Single { index: VariantIdx::new(0) },
476             fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
477             abi: Abi::Aggregate { sized: false },
478             largest_niche: None,
479             align: dl.i8_align,
480             size: Size::ZERO,
481         }),
482
483         // Odd unit types.
484         ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
485         ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
486             let mut unit = univariant_uninterned(
487                 cx,
488                 ty,
489                 &[],
490                 &ReprOptions::default(),
491                 StructKind::AlwaysSized,
492             )?;
493             match unit.abi {
494                 Abi::Aggregate { ref mut sized } => *sized = false,
495                 _ => bug!(),
496             }
497             tcx.intern_layout(unit)
498         }
499
500         ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
501
502         ty::Closure(_, ref substs) => {
503             let tys = substs.as_closure().upvar_tys();
504             univariant(
505                 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
506                 &ReprOptions::default(),
507                 StructKind::AlwaysSized,
508             )?
509         }
510
511         ty::Tuple(tys) => {
512             let kind =
513                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
514
515             univariant(
516                 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
517                 &ReprOptions::default(),
518                 kind,
519             )?
520         }
521
522         // SIMD vector types.
523         ty::Adt(def, substs) if def.repr().simd() => {
524             if !def.is_struct() {
525                 // Should have yielded E0517 by now.
526                 tcx.sess.delay_span_bug(
527                     DUMMY_SP,
528                     "#[repr(simd)] was applied to an ADT that is not a struct",
529                 );
530                 return Err(LayoutError::Unknown(ty));
531             }
532
533             // Supported SIMD vectors are homogeneous ADTs with at least one field:
534             //
535             // * #[repr(simd)] struct S(T, T, T, T);
536             // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
537             // * #[repr(simd)] struct S([T; 4])
538             //
539             // where T is a primitive scalar (integer/float/pointer).
540
541             // SIMD vectors with zero fields are not supported.
542             // (should be caught by typeck)
543             if def.non_enum_variant().fields.is_empty() {
544                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
545             }
546
547             // Type of the first ADT field:
548             let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
549
550             // Heterogeneous SIMD vectors are not supported:
551             // (should be caught by typeck)
552             for fi in &def.non_enum_variant().fields {
553                 if fi.ty(tcx, substs) != f0_ty {
554                     tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
555                 }
556             }
557
558             // The element type and number of elements of the SIMD vector
559             // are obtained from:
560             //
561             // * the element type and length of the single array field, if
562             // the first field is of array type, or
563             //
564             // * the homogeneous field type and the number of fields.
565             let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
566                 // First ADT field is an array:
567
568                 // SIMD vectors with multiple array fields are not supported:
569                 // (should be caught by typeck)
570                 if def.non_enum_variant().fields.len() != 1 {
571                     tcx.sess.fatal(&format!(
572                         "monomorphising SIMD type `{}` with more than one array field",
573                         ty
574                     ));
575                 }
576
577                 // Extract the number of elements from the layout of the array field:
578                 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
579                         return Err(LayoutError::Unknown(ty));
580                     };
581
582                 (*e_ty, *count, true)
583             } else {
584                 // First ADT field is not an array:
585                 (f0_ty, def.non_enum_variant().fields.len() as _, false)
586             };
587
588             // SIMD vectors of zero length are not supported.
589             // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
590             // support.
591             //
592             // Can't be caught in typeck if the array length is generic.
593             if e_len == 0 {
594                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
595             } else if e_len > MAX_SIMD_LANES {
596                 tcx.sess.fatal(&format!(
597                     "monomorphising SIMD type `{}` of length greater than {}",
598                     ty, MAX_SIMD_LANES,
599                 ));
600             }
601
602             // Compute the ABI of the element type:
603             let e_ly = cx.layout_of(e_ty)?;
604             let Abi::Scalar(e_abi) = e_ly.abi else {
605                     // This error isn't caught in typeck, e.g., if
606                     // the element type of the vector is generic.
607                     tcx.sess.fatal(&format!(
608                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
609                         (integer/float/pointer) element type `{}`",
610                         ty, e_ty
611                     ))
612                 };
613
614             // Compute the size and alignment of the vector:
615             let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
616             let align = dl.vector_align(size);
617             let size = size.align_to(align.abi);
618
619             // Compute the placement of the vector fields:
620             let fields = if is_array {
621                 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
622             } else {
623                 FieldsShape::Array { stride: e_ly.size, count: e_len }
624             };
625
626             tcx.intern_layout(LayoutS {
627                 variants: Variants::Single { index: VariantIdx::new(0) },
628                 fields,
629                 abi: Abi::Vector { element: e_abi, count: e_len },
630                 largest_niche: e_ly.largest_niche,
631                 size,
632                 align,
633             })
634         }
635
636         // ADTs.
637         ty::Adt(def, substs) => {
638             // Cache the field layouts.
639             let variants = def
640                 .variants()
641                 .iter()
642                 .map(|v| {
643                     v.fields
644                         .iter()
645                         .map(|field| cx.layout_of(field.ty(tcx, substs)))
646                         .collect::<Result<Vec<_>, _>>()
647                 })
648                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
649
650             if def.is_union() {
651                 if def.repr().pack.is_some() && def.repr().align.is_some() {
652                     cx.tcx.sess.delay_span_bug(
653                         tcx.def_span(def.did()),
654                         "union cannot be packed and aligned",
655                     );
656                     return Err(LayoutError::Unknown(ty));
657                 }
658
659                 let mut align =
660                     if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
661
662                 if let Some(repr_align) = def.repr().align {
663                     align = align.max(AbiAndPrefAlign::new(repr_align));
664                 }
665
666                 let optimize = !def.repr().inhibit_union_abi_opt();
667                 let mut size = Size::ZERO;
668                 let mut abi = Abi::Aggregate { sized: true };
669                 let index = VariantIdx::new(0);
670                 for field in &variants[index] {
671                     assert!(!field.is_unsized());
672                     align = align.max(field.align);
673
674                     // If all non-ZST fields have the same ABI, forward this ABI
675                     if optimize && !field.is_zst() {
676                         // Discard valid range information and allow undef
677                         let field_abi = match field.abi {
678                             Abi::Scalar(x) => Abi::Scalar(x.to_union()),
679                             Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
680                             Abi::Vector { element: x, count } => {
681                                 Abi::Vector { element: x.to_union(), count }
682                             }
683                             Abi::Uninhabited | Abi::Aggregate { .. } => {
684                                 Abi::Aggregate { sized: true }
685                             }
686                         };
687
688                         if size == Size::ZERO {
689                             // first non ZST: initialize 'abi'
690                             abi = field_abi;
691                         } else if abi != field_abi {
692                             // different fields have different ABI: reset to Aggregate
693                             abi = Abi::Aggregate { sized: true };
694                         }
695                     }
696
697                     size = cmp::max(size, field.size);
698                 }
699
700                 if let Some(pack) = def.repr().pack {
701                     align = align.min(AbiAndPrefAlign::new(pack));
702                 }
703
704                 return Ok(tcx.intern_layout(LayoutS {
705                     variants: Variants::Single { index },
706                     fields: FieldsShape::Union(
707                         NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
708                     ),
709                     abi,
710                     largest_niche: None,
711                     align,
712                     size: size.align_to(align.abi),
713                 }));
714             }
715
716             // A variant is absent if it's uninhabited and only has ZST fields.
717             // Present uninhabited variants only require space for their fields,
718             // but *not* an encoding of the discriminant (e.g., a tag value).
719             // See issue #49298 for more details on the need to leave space
720             // for non-ZST uninhabited data (mostly partial initialization).
721             let absent = |fields: &[TyAndLayout<'_>]| {
722                 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
723                 let is_zst = fields.iter().all(|f| f.is_zst());
724                 uninhabited && is_zst
725             };
726             let (present_first, present_second) = {
727                 let mut present_variants = variants
728                     .iter_enumerated()
729                     .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
730                 (present_variants.next(), present_variants.next())
731             };
732             let present_first = match present_first {
733                 Some(present_first) => present_first,
734                 // Uninhabited because it has no variants, or only absent ones.
735                 None if def.is_enum() => {
736                     return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
737                 }
738                 // If it's a struct, still compute a layout so that we can still compute the
739                 // field offsets.
740                 None => VariantIdx::new(0),
741             };
742
743             let is_struct = !def.is_enum() ||
744                     // Only one variant is present.
745                     (present_second.is_none() &&
746                         // Representation optimizations are allowed.
747                         !def.repr().inhibit_enum_layout_opt());
748             if is_struct {
749                 // Struct, or univariant enum equivalent to a struct.
750                 // (Typechecking will reject discriminant-sizing attrs.)
751
752                 let v = present_first;
753                 let kind = if def.is_enum() || variants[v].is_empty() {
754                     StructKind::AlwaysSized
755                 } else {
756                     let param_env = tcx.param_env(def.did());
757                     let last_field = def.variant(v).fields.last().unwrap();
758                     let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
759                     if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
760                 };
761
762                 let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
763                 st.variants = Variants::Single { index: v };
764
765                 if def.is_unsafe_cell() {
766                     let hide_niches = |scalar: &mut _| match scalar {
767                         Scalar::Initialized { value, valid_range } => {
768                             *valid_range = WrappingRange::full(value.size(dl))
769                         }
770                         // Already doesn't have any niches
771                         Scalar::Union { .. } => {}
772                     };
773                     match &mut st.abi {
774                         Abi::Uninhabited => {}
775                         Abi::Scalar(scalar) => hide_niches(scalar),
776                         Abi::ScalarPair(a, b) => {
777                             hide_niches(a);
778                             hide_niches(b);
779                         }
780                         Abi::Vector { element, count: _ } => hide_niches(element),
781                         Abi::Aggregate { sized: _ } => {}
782                     }
783                     st.largest_niche = None;
784                     return Ok(tcx.intern_layout(st));
785                 }
786
787                 let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
788                 match st.abi {
789                     Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
790                         // the asserts ensure that we are not using the
791                         // `#[rustc_layout_scalar_valid_range(n)]`
792                         // attribute to widen the range of anything as that would probably
793                         // result in UB somewhere
794                         // FIXME(eddyb) the asserts are probably not needed,
795                         // as larger validity ranges would result in missed
796                         // optimizations, *not* wrongly assuming the inner
797                         // value is valid. e.g. unions enlarge validity ranges,
798                         // because the values may be uninitialized.
799                         if let Bound::Included(start) = start {
800                             // FIXME(eddyb) this might be incorrect - it doesn't
801                             // account for wrap-around (end < start) ranges.
802                             let valid_range = scalar.valid_range_mut();
803                             assert!(valid_range.start <= start);
804                             valid_range.start = start;
805                         }
806                         if let Bound::Included(end) = end {
807                             // FIXME(eddyb) this might be incorrect - it doesn't
808                             // account for wrap-around (end < start) ranges.
809                             let valid_range = scalar.valid_range_mut();
810                             assert!(valid_range.end >= end);
811                             valid_range.end = end;
812                         }
813
814                         // Update `largest_niche` if we have introduced a larger niche.
815                         let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
816                         if let Some(niche) = niche {
817                             match st.largest_niche {
818                                 Some(largest_niche) => {
819                                     // Replace the existing niche even if they're equal,
820                                     // because this one is at a lower offset.
821                                     if largest_niche.available(dl) <= niche.available(dl) {
822                                         st.largest_niche = Some(niche);
823                                     }
824                                 }
825                                 None => st.largest_niche = Some(niche),
826                             }
827                         }
828                     }
829                     _ => assert!(
830                         start == Bound::Unbounded && end == Bound::Unbounded,
831                         "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
832                         def,
833                         st,
834                     ),
835                 }
836
837                 return Ok(tcx.intern_layout(st));
838             }
839
840             // At this point, we have handled all unions and
841             // structs. (We have also handled univariant enums
842             // that allow representation optimization.)
843             assert!(def.is_enum());
844
845             // Until we've decided whether to use the tagged or
846             // niche filling LayoutS, we don't want to intern the
847             // variant layouts, so we can't store them in the
848             // overall LayoutS. Store the overall LayoutS
849             // and the variant LayoutSs here until then.
850             struct TmpLayout<'tcx> {
851                 layout: LayoutS<'tcx>,
852                 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
853             }
854
855             let calculate_niche_filling_layout =
856                 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
857                     // The current code for niche-filling relies on variant indices
858                     // instead of actual discriminants, so enums with
859                     // explicit discriminants (RFC #2363) would misbehave.
860                     if def.repr().inhibit_enum_layout_opt()
861                         || def
862                             .variants()
863                             .iter_enumerated()
864                             .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
865                     {
866                         return Ok(None);
867                     }
868
869                     if variants.len() < 2 {
870                         return Ok(None);
871                     }
872
873                     let mut align = dl.aggregate_align;
874                     let mut variant_layouts = variants
875                         .iter_enumerated()
876                         .map(|(j, v)| {
877                             let mut st = univariant_uninterned(
878                                 cx,
879                                 ty,
880                                 v,
881                                 &def.repr(),
882                                 StructKind::AlwaysSized,
883                             )?;
884                             st.variants = Variants::Single { index: j };
885
886                             align = align.max(st.align);
887
888                             Ok(st)
889                         })
890                         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
891
892                     let largest_variant_index = match variant_layouts
893                         .iter_enumerated()
894                         .max_by_key(|(_i, layout)| layout.size.bytes())
895                         .map(|(i, _layout)| i)
896                     {
897                         None => return Ok(None),
898                         Some(i) => i,
899                     };
900
901                     let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
902                     let needs_disc = |index: VariantIdx| {
903                         index != largest_variant_index && !absent(&variants[index])
904                     };
905                     let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
906                         ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
907
908                     let count = niche_variants.size_hint().1.unwrap() as u128;
909
910                     // Find the field with the largest niche
911                     let (field_index, niche, (niche_start, niche_scalar)) = match variants
912                         [largest_variant_index]
913                         .iter()
914                         .enumerate()
915                         .filter_map(|(j, field)| Some((j, field.largest_niche?)))
916                         .max_by_key(|(_, niche)| niche.available(dl))
917                         .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
918                     {
919                         None => return Ok(None),
920                         Some(x) => x,
921                     };
922
923                     let niche_offset = niche.offset
924                         + variant_layouts[largest_variant_index].fields.offset(field_index);
925                     let niche_size = niche.value.size(dl);
926                     let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
927
928                     let all_variants_fit =
929                         variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
930                             if i == largest_variant_index {
931                                 return true;
932                             }
933
934                             layout.largest_niche = None;
935
936                             if layout.size <= niche_offset {
937                                 // This variant will fit before the niche.
938                                 return true;
939                             }
940
941                             // Determine if it'll fit after the niche.
942                             let this_align = layout.align.abi;
943                             let this_offset = (niche_offset + niche_size).align_to(this_align);
944
945                             if this_offset + layout.size > size {
946                                 return false;
947                             }
948
949                             // It'll fit, but we need to make some adjustments.
950                             match layout.fields {
951                                 FieldsShape::Arbitrary { ref mut offsets, .. } => {
952                                     for (j, offset) in offsets.iter_mut().enumerate() {
953                                         if !variants[i][j].is_zst() {
954                                             *offset += this_offset;
955                                         }
956                                     }
957                                 }
958                                 _ => {
959                                     panic!("Layout of fields should be Arbitrary for variants")
960                                 }
961                             }
962
963                             // It can't be a Scalar or ScalarPair because the offset isn't 0.
964                             if !layout.abi.is_uninhabited() {
965                                 layout.abi = Abi::Aggregate { sized: true };
966                             }
967                             layout.size += this_offset;
968
969                             true
970                         });
971
972                     if !all_variants_fit {
973                         return Ok(None);
974                     }
975
976                     let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
977
978                     let others_zst = variant_layouts
979                         .iter_enumerated()
980                         .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
981                     let same_size = size == variant_layouts[largest_variant_index].size;
982                     let same_align = align == variant_layouts[largest_variant_index].align;
983
984                     let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
985                         Abi::Uninhabited
986                     } else if same_size && same_align && others_zst {
987                         match variant_layouts[largest_variant_index].abi {
988                             // When the total alignment and size match, we can use the
989                             // same ABI as the scalar variant with the reserved niche.
990                             Abi::Scalar(_) => Abi::Scalar(niche_scalar),
991                             Abi::ScalarPair(first, second) => {
992                                 // Only the niche is guaranteed to be initialised,
993                                 // so use union layouts for the other primitive.
994                                 if niche_offset == Size::ZERO {
995                                     Abi::ScalarPair(niche_scalar, second.to_union())
996                                 } else {
997                                     Abi::ScalarPair(first.to_union(), niche_scalar)
998                                 }
999                             }
1000                             _ => Abi::Aggregate { sized: true },
1001                         }
1002                     } else {
1003                         Abi::Aggregate { sized: true }
1004                     };
1005
1006                     let layout = LayoutS {
1007                         variants: Variants::Multiple {
1008                             tag: niche_scalar,
1009                             tag_encoding: TagEncoding::Niche {
1010                                 untagged_variant: largest_variant_index,
1011                                 niche_variants,
1012                                 niche_start,
1013                             },
1014                             tag_field: 0,
1015                             variants: IndexVec::new(),
1016                         },
1017                         fields: FieldsShape::Arbitrary {
1018                             offsets: vec![niche_offset],
1019                             memory_index: vec![0],
1020                         },
1021                         abi,
1022                         largest_niche,
1023                         size,
1024                         align,
1025                     };
1026
1027                     Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1028                 };
1029
1030             let niche_filling_layout = calculate_niche_filling_layout()?;
1031
1032             let (mut min, mut max) = (i128::MAX, i128::MIN);
1033             let discr_type = def.repr().discr_type();
1034             let bits = Integer::from_attr(cx, discr_type).size().bits();
1035             for (i, discr) in def.discriminants(tcx) {
1036                 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1037                     continue;
1038                 }
1039                 let mut x = discr.val as i128;
1040                 if discr_type.is_signed() {
1041                     // sign extend the raw representation to be an i128
1042                     x = (x << (128 - bits)) >> (128 - bits);
1043                 }
1044                 if x < min {
1045                     min = x;
1046                 }
1047                 if x > max {
1048                     max = x;
1049                 }
1050             }
1051             // We might have no inhabited variants, so pretend there's at least one.
1052             if (min, max) == (i128::MAX, i128::MIN) {
1053                 min = 0;
1054                 max = 0;
1055             }
1056             assert!(min <= max, "discriminant range is {}...{}", min, max);
1057             let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1058
1059             let mut align = dl.aggregate_align;
1060             let mut size = Size::ZERO;
1061
1062             // We're interested in the smallest alignment, so start large.
1063             let mut start_align = Align::from_bytes(256).unwrap();
1064             assert_eq!(Integer::for_align(dl, start_align), None);
1065
1066             // repr(C) on an enum tells us to make a (tag, union) layout,
1067             // so we need to grow the prefix alignment to be at least
1068             // the alignment of the union. (This value is used both for
1069             // determining the alignment of the overall enum, and the
1070             // determining the alignment of the payload after the tag.)
1071             let mut prefix_align = min_ity.align(dl).abi;
1072             if def.repr().c() {
1073                 for fields in &variants {
1074                     for field in fields {
1075                         prefix_align = prefix_align.max(field.align.abi);
1076                     }
1077                 }
1078             }
1079
1080             // Create the set of structs that represent each variant.
1081             let mut layout_variants = variants
1082                 .iter_enumerated()
1083                 .map(|(i, field_layouts)| {
1084                     let mut st = univariant_uninterned(
1085                         cx,
1086                         ty,
1087                         &field_layouts,
1088                         &def.repr(),
1089                         StructKind::Prefixed(min_ity.size(), prefix_align),
1090                     )?;
1091                     st.variants = Variants::Single { index: i };
1092                     // Find the first field we can't move later
1093                     // to make room for a larger discriminant.
1094                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1095                         if !field.is_zst() || field.align.abi.bytes() != 1 {
1096                             start_align = start_align.min(field.align.abi);
1097                             break;
1098                         }
1099                     }
1100                     size = cmp::max(size, st.size);
1101                     align = align.max(st.align);
1102                     Ok(st)
1103                 })
1104                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1105
1106             // Align the maximum variant size to the largest alignment.
1107             size = size.align_to(align.abi);
1108
1109             if size.bytes() >= dl.obj_size_bound() {
1110                 return Err(LayoutError::SizeOverflow(ty));
1111             }
1112
1113             let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1114             if typeck_ity < min_ity {
1115                 // It is a bug if Layout decided on a greater discriminant size than typeck for
1116                 // some reason at this point (based on values discriminant can take on). Mostly
1117                 // because this discriminant will be loaded, and then stored into variable of
1118                 // type calculated by typeck. Consider such case (a bug): typeck decided on
1119                 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1120                 // discriminant values. That would be a bug, because then, in codegen, in order
1121                 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1122                 // space necessary to represent would have to be discarded (or layout is wrong
1123                 // on thinking it needs 16 bits)
1124                 bug!(
1125                     "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1126                     min_ity,
1127                     typeck_ity
1128                 );
1129                 // However, it is fine to make discr type however large (as an optimisation)
1130                 // after this point â€“ we’ll just truncate the value we load in codegen.
1131             }
1132
1133             // Check to see if we should use a different type for the
1134             // discriminant. We can safely use a type with the same size
1135             // as the alignment of the first field of each variant.
1136             // We increase the size of the discriminant to avoid LLVM copying
1137             // padding when it doesn't need to. This normally causes unaligned
1138             // load/stores and excessive memcpy/memset operations. By using a
1139             // bigger integer size, LLVM can be sure about its contents and
1140             // won't be so conservative.
1141
1142             // Use the initial field alignment
1143             let mut ity = if def.repr().c() || def.repr().int.is_some() {
1144                 min_ity
1145             } else {
1146                 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1147             };
1148
1149             // If the alignment is not larger than the chosen discriminant size,
1150             // don't use the alignment as the final size.
1151             if ity <= min_ity {
1152                 ity = min_ity;
1153             } else {
1154                 // Patch up the variants' first few fields.
1155                 let old_ity_size = min_ity.size();
1156                 let new_ity_size = ity.size();
1157                 for variant in &mut layout_variants {
1158                     match variant.fields {
1159                         FieldsShape::Arbitrary { ref mut offsets, .. } => {
1160                             for i in offsets {
1161                                 if *i <= old_ity_size {
1162                                     assert_eq!(*i, old_ity_size);
1163                                     *i = new_ity_size;
1164                                 }
1165                             }
1166                             // We might be making the struct larger.
1167                             if variant.size <= old_ity_size {
1168                                 variant.size = new_ity_size;
1169                             }
1170                         }
1171                         _ => bug!(),
1172                     }
1173                 }
1174             }
1175
1176             let tag_mask = ity.size().unsigned_int_max();
1177             let tag = Scalar::Initialized {
1178                 value: Int(ity, signed),
1179                 valid_range: WrappingRange {
1180                     start: (min as u128 & tag_mask),
1181                     end: (max as u128 & tag_mask),
1182                 },
1183             };
1184             let mut abi = Abi::Aggregate { sized: true };
1185
1186             if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1187                 abi = Abi::Uninhabited;
1188             } else if tag.size(dl) == size {
1189                 // Make sure we only use scalar layout when the enum is entirely its
1190                 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1191                 abi = Abi::Scalar(tag);
1192             } else {
1193                 // Try to use a ScalarPair for all tagged enums.
1194                 let mut common_prim = None;
1195                 let mut common_prim_initialized_in_all_variants = true;
1196                 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1197                     let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1198                             bug!();
1199                         };
1200                     let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1201                     let (field, offset) = match (fields.next(), fields.next()) {
1202                         (None, None) => {
1203                             common_prim_initialized_in_all_variants = false;
1204                             continue;
1205                         }
1206                         (Some(pair), None) => pair,
1207                         _ => {
1208                             common_prim = None;
1209                             break;
1210                         }
1211                     };
1212                     let prim = match field.abi {
1213                         Abi::Scalar(scalar) => {
1214                             common_prim_initialized_in_all_variants &=
1215                                 matches!(scalar, Scalar::Initialized { .. });
1216                             scalar.primitive()
1217                         }
1218                         _ => {
1219                             common_prim = None;
1220                             break;
1221                         }
1222                     };
1223                     if let Some(pair) = common_prim {
1224                         // This is pretty conservative. We could go fancier
1225                         // by conflating things like i32 and u32, or even
1226                         // realising that (u8, u8) could just cohabit with
1227                         // u16 or even u32.
1228                         if pair != (prim, offset) {
1229                             common_prim = None;
1230                             break;
1231                         }
1232                     } else {
1233                         common_prim = Some((prim, offset));
1234                     }
1235                 }
1236                 if let Some((prim, offset)) = common_prim {
1237                     let prim_scalar = if common_prim_initialized_in_all_variants {
1238                         scalar_unit(prim)
1239                     } else {
1240                         // Common prim might be uninit.
1241                         Scalar::Union { value: prim }
1242                     };
1243                     let pair = scalar_pair(cx, tag, prim_scalar);
1244                     let pair_offsets = match pair.fields {
1245                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1246                             assert_eq!(memory_index, &[0, 1]);
1247                             offsets
1248                         }
1249                         _ => bug!(),
1250                     };
1251                     if pair_offsets[0] == Size::ZERO
1252                         && pair_offsets[1] == *offset
1253                         && align == pair.align
1254                         && size == pair.size
1255                     {
1256                         // We can use `ScalarPair` only when it matches our
1257                         // already computed layout (including `#[repr(C)]`).
1258                         abi = pair.abi;
1259                     }
1260                 }
1261             }
1262
1263             // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1264             // variants to ensure they are consistent. This is because a downcast is
1265             // semantically a NOP, and thus should not affect layout.
1266             if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1267                 for variant in &mut layout_variants {
1268                     // We only do this for variants with fields; the others are not accessed anyway.
1269                     // Also do not overwrite any already existing "clever" ABIs.
1270                     if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
1271                         variant.abi = abi;
1272                         // Also need to bump up the size and alignment, so that the entire value fits in here.
1273                         variant.size = cmp::max(variant.size, size);
1274                         variant.align.abi = cmp::max(variant.align.abi, align.abi);
1275                     }
1276                 }
1277             }
1278
1279             let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1280
1281             let tagged_layout = LayoutS {
1282                 variants: Variants::Multiple {
1283                     tag,
1284                     tag_encoding: TagEncoding::Direct,
1285                     tag_field: 0,
1286                     variants: IndexVec::new(),
1287                 },
1288                 fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
1289                 largest_niche,
1290                 abi,
1291                 align,
1292                 size,
1293             };
1294
1295             let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1296
1297             let mut best_layout = match (tagged_layout, niche_filling_layout) {
1298                 (tl, Some(nl)) => {
1299                     // Pick the smaller layout; otherwise,
1300                     // pick the layout with the larger niche; otherwise,
1301                     // pick tagged as it has simpler codegen.
1302                     use Ordering::*;
1303                     let niche_size = |tmp_l: &TmpLayout<'_>| {
1304                         tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1305                     };
1306                     match (
1307                         tl.layout.size.cmp(&nl.layout.size),
1308                         niche_size(&tl).cmp(&niche_size(&nl)),
1309                     ) {
1310                         (Greater, _) => nl,
1311                         (Equal, Less) => nl,
1312                         _ => tl,
1313                     }
1314                 }
1315                 (tl, None) => tl,
1316             };
1317
1318             // Now we can intern the variant layouts and store them in the enum layout.
1319             best_layout.layout.variants = match best_layout.layout.variants {
1320                 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1321                     tag,
1322                     tag_encoding,
1323                     tag_field,
1324                     variants: best_layout
1325                         .variants
1326                         .into_iter()
1327                         .map(|layout| tcx.intern_layout(layout))
1328                         .collect(),
1329                 },
1330                 _ => bug!(),
1331             };
1332
1333             tcx.intern_layout(best_layout.layout)
1334         }
1335
1336         // Types with no meaningful known layout.
1337         ty::Projection(_) | ty::Opaque(..) => {
1338             // NOTE(eddyb) `layout_of` query should've normalized these away,
1339             // if that was possible, so there's no reason to try again here.
1340             return Err(LayoutError::Unknown(ty));
1341         }
1342
1343         ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1344             bug!("Layout::compute: unexpected type `{}`", ty)
1345         }
1346
1347         ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1348             return Err(LayoutError::Unknown(ty));
1349         }
1350     })
1351 }
1352
1353 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1354 #[derive(Clone, Debug, PartialEq)]
1355 enum SavedLocalEligibility {
1356     Unassigned,
1357     Assigned(VariantIdx),
1358     // FIXME: Use newtype_index so we aren't wasting bytes
1359     Ineligible(Option<u32>),
1360 }
1361
1362 // When laying out generators, we divide our saved local fields into two
1363 // categories: overlap-eligible and overlap-ineligible.
1364 //
1365 // Those fields which are ineligible for overlap go in a "prefix" at the
1366 // beginning of the layout, and always have space reserved for them.
1367 //
1368 // Overlap-eligible fields are only assigned to one variant, so we lay
1369 // those fields out for each variant and put them right after the
1370 // prefix.
1371 //
1372 // Finally, in the layout details, we point to the fields from the
1373 // variants they are assigned to. It is possible for some fields to be
1374 // included in multiple variants. No field ever "moves around" in the
1375 // layout; its offset is always the same.
1376 //
1377 // Also included in the layout are the upvars and the discriminant.
1378 // These are included as fields on the "outer" layout; they are not part
1379 // of any variant.
1380
1381 /// Compute the eligibility and assignment of each local.
1382 fn generator_saved_local_eligibility<'tcx>(
1383     info: &GeneratorLayout<'tcx>,
1384 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1385     use SavedLocalEligibility::*;
1386
1387     let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1388         IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1389
1390     // The saved locals not eligible for overlap. These will get
1391     // "promoted" to the prefix of our generator.
1392     let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1393
1394     // Figure out which of our saved locals are fields in only
1395     // one variant. The rest are deemed ineligible for overlap.
1396     for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1397         for local in fields {
1398             match assignments[*local] {
1399                 Unassigned => {
1400                     assignments[*local] = Assigned(variant_index);
1401                 }
1402                 Assigned(idx) => {
1403                     // We've already seen this local at another suspension
1404                     // point, so it is no longer a candidate.
1405                     trace!(
1406                         "removing local {:?} in >1 variant ({:?}, {:?})",
1407                         local,
1408                         variant_index,
1409                         idx
1410                     );
1411                     ineligible_locals.insert(*local);
1412                     assignments[*local] = Ineligible(None);
1413                 }
1414                 Ineligible(_) => {}
1415             }
1416         }
1417     }
1418
1419     // Next, check every pair of eligible locals to see if they
1420     // conflict.
1421     for local_a in info.storage_conflicts.rows() {
1422         let conflicts_a = info.storage_conflicts.count(local_a);
1423         if ineligible_locals.contains(local_a) {
1424             continue;
1425         }
1426
1427         for local_b in info.storage_conflicts.iter(local_a) {
1428             // local_a and local_b are storage live at the same time, therefore they
1429             // cannot overlap in the generator layout. The only way to guarantee
1430             // this is if they are in the same variant, or one is ineligible
1431             // (which means it is stored in every variant).
1432             if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
1433                 continue;
1434             }
1435
1436             // If they conflict, we will choose one to make ineligible.
1437             // This is not always optimal; it's just a greedy heuristic that
1438             // seems to produce good results most of the time.
1439             let conflicts_b = info.storage_conflicts.count(local_b);
1440             let (remove, other) =
1441                 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1442             ineligible_locals.insert(remove);
1443             assignments[remove] = Ineligible(None);
1444             trace!("removing local {:?} due to conflict with {:?}", remove, other);
1445         }
1446     }
1447
1448     // Count the number of variants in use. If only one of them, then it is
1449     // impossible to overlap any locals in our layout. In this case it's
1450     // always better to make the remaining locals ineligible, so we can
1451     // lay them out with the other locals in the prefix and eliminate
1452     // unnecessary padding bytes.
1453     {
1454         let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1455         for assignment in &assignments {
1456             if let Assigned(idx) = assignment {
1457                 used_variants.insert(*idx);
1458             }
1459         }
1460         if used_variants.count() < 2 {
1461             for assignment in assignments.iter_mut() {
1462                 *assignment = Ineligible(None);
1463             }
1464             ineligible_locals.insert_all();
1465         }
1466     }
1467
1468     // Write down the order of our locals that will be promoted to the prefix.
1469     {
1470         for (idx, local) in ineligible_locals.iter().enumerate() {
1471             assignments[local] = Ineligible(Some(idx as u32));
1472         }
1473     }
1474     debug!("generator saved local assignments: {:?}", assignments);
1475
1476     (ineligible_locals, assignments)
1477 }
1478
1479 /// Compute the full generator layout.
1480 fn generator_layout<'tcx>(
1481     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1482     ty: Ty<'tcx>,
1483     def_id: hir::def_id::DefId,
1484     substs: SubstsRef<'tcx>,
1485 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1486     use SavedLocalEligibility::*;
1487     let tcx = cx.tcx;
1488     let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1489
1490     let Some(info) = tcx.generator_layout(def_id) else {
1491             return Err(LayoutError::Unknown(ty));
1492         };
1493     let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
1494
1495     // Build a prefix layout, including "promoting" all ineligible
1496     // locals as part of the prefix. We compute the layout of all of
1497     // these fields at once to get optimal packing.
1498     let tag_index = substs.as_generator().prefix_tys().count();
1499
1500     // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1501     let max_discr = (info.variant_fields.len() - 1) as u128;
1502     let discr_int = Integer::fit_unsigned(max_discr);
1503     let discr_int_ty = discr_int.to_ty(tcx, false);
1504     let tag = Scalar::Initialized {
1505         value: Primitive::Int(discr_int, false),
1506         valid_range: WrappingRange { start: 0, end: max_discr },
1507     };
1508     let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
1509     let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1510
1511     let promoted_layouts = ineligible_locals
1512         .iter()
1513         .map(|local| subst_field(info.field_tys[local]))
1514         .map(|ty| tcx.mk_maybe_uninit(ty))
1515         .map(|ty| cx.layout_of(ty));
1516     let prefix_layouts = substs
1517         .as_generator()
1518         .prefix_tys()
1519         .map(|ty| cx.layout_of(ty))
1520         .chain(iter::once(Ok(tag_layout)))
1521         .chain(promoted_layouts)
1522         .collect::<Result<Vec<_>, _>>()?;
1523     let prefix = univariant_uninterned(
1524         cx,
1525         ty,
1526         &prefix_layouts,
1527         &ReprOptions::default(),
1528         StructKind::AlwaysSized,
1529     )?;
1530
1531     let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1532
1533     // Split the prefix layout into the "outer" fields (upvars and
1534     // discriminant) and the "promoted" fields. Promoted fields will
1535     // get included in each variant that requested them in
1536     // GeneratorLayout.
1537     debug!("prefix = {:#?}", prefix);
1538     let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1539         FieldsShape::Arbitrary { mut offsets, memory_index } => {
1540             let mut inverse_memory_index = invert_mapping(&memory_index);
1541
1542             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1543             // "outer" and "promoted" fields respectively.
1544             let b_start = (tag_index + 1) as u32;
1545             let offsets_b = offsets.split_off(b_start as usize);
1546             let offsets_a = offsets;
1547
1548             // Disentangle the "a" and "b" components of `inverse_memory_index`
1549             // by preserving the order but keeping only one disjoint "half" each.
1550             // FIXME(eddyb) build a better abstraction for permutations, if possible.
1551             let inverse_memory_index_b: Vec<_> =
1552                 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1553             inverse_memory_index.retain(|&i| i < b_start);
1554             let inverse_memory_index_a = inverse_memory_index;
1555
1556             // Since `inverse_memory_index_{a,b}` each only refer to their
1557             // respective fields, they can be safely inverted
1558             let memory_index_a = invert_mapping(&inverse_memory_index_a);
1559             let memory_index_b = invert_mapping(&inverse_memory_index_b);
1560
1561             let outer_fields =
1562                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1563             (outer_fields, offsets_b, memory_index_b)
1564         }
1565         _ => bug!(),
1566     };
1567
1568     let mut size = prefix.size;
1569     let mut align = prefix.align;
1570     let variants = info
1571         .variant_fields
1572         .iter_enumerated()
1573         .map(|(index, variant_fields)| {
1574             // Only include overlap-eligible fields when we compute our variant layout.
1575             let variant_only_tys = variant_fields
1576                 .iter()
1577                 .filter(|local| match assignments[**local] {
1578                     Unassigned => bug!(),
1579                     Assigned(v) if v == index => true,
1580                     Assigned(_) => bug!("assignment does not match variant"),
1581                     Ineligible(_) => false,
1582                 })
1583                 .map(|local| subst_field(info.field_tys[*local]));
1584
1585             let mut variant = univariant_uninterned(
1586                 cx,
1587                 ty,
1588                 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1589                 &ReprOptions::default(),
1590                 StructKind::Prefixed(prefix_size, prefix_align.abi),
1591             )?;
1592             variant.variants = Variants::Single { index };
1593
1594             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1595                     bug!();
1596                 };
1597
1598             // Now, stitch the promoted and variant-only fields back together in
1599             // the order they are mentioned by our GeneratorLayout.
1600             // Because we only use some subset (that can differ between variants)
1601             // of the promoted fields, we can't just pick those elements of the
1602             // `promoted_memory_index` (as we'd end up with gaps).
1603             // So instead, we build an "inverse memory_index", as if all of the
1604             // promoted fields were being used, but leave the elements not in the
1605             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1606             // obtain a valid (bijective) mapping.
1607             const INVALID_FIELD_IDX: u32 = !0;
1608             let mut combined_inverse_memory_index =
1609                 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1610             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1611             let combined_offsets = variant_fields
1612                 .iter()
1613                 .enumerate()
1614                 .map(|(i, local)| {
1615                     let (offset, memory_index) = match assignments[*local] {
1616                         Unassigned => bug!(),
1617                         Assigned(_) => {
1618                             let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1619                             (offset, promoted_memory_index.len() as u32 + memory_index)
1620                         }
1621                         Ineligible(field_idx) => {
1622                             let field_idx = field_idx.unwrap() as usize;
1623                             (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1624                         }
1625                     };
1626                     combined_inverse_memory_index[memory_index as usize] = i as u32;
1627                     offset
1628                 })
1629                 .collect();
1630
1631             // Remove the unused slots and invert the mapping to obtain the
1632             // combined `memory_index` (also see previous comment).
1633             combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1634             let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1635
1636             variant.fields = FieldsShape::Arbitrary {
1637                 offsets: combined_offsets,
1638                 memory_index: combined_memory_index,
1639             };
1640
1641             size = size.max(variant.size);
1642             align = align.max(variant.align);
1643             Ok(tcx.intern_layout(variant))
1644         })
1645         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1646
1647     size = size.align_to(align.abi);
1648
1649     let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1650         Abi::Uninhabited
1651     } else {
1652         Abi::Aggregate { sized: true }
1653     };
1654
1655     let layout = tcx.intern_layout(LayoutS {
1656         variants: Variants::Multiple {
1657             tag,
1658             tag_encoding: TagEncoding::Direct,
1659             tag_field: tag_index,
1660             variants,
1661         },
1662         fields: outer_fields,
1663         abi,
1664         largest_niche: prefix.largest_niche,
1665         size,
1666         align,
1667     });
1668     debug!("generator layout ({:?}): {:#?}", ty, layout);
1669     Ok(layout)
1670 }
1671
1672 /// This is invoked by the `layout_of` query to record the final
1673 /// layout of each type.
1674 #[inline(always)]
1675 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
1676     // If we are running with `-Zprint-type-sizes`, maybe record layouts
1677     // for dumping later.
1678     if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
1679         record_layout_for_printing_outlined(cx, layout)
1680     }
1681 }
1682
1683 fn record_layout_for_printing_outlined<'tcx>(
1684     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1685     layout: TyAndLayout<'tcx>,
1686 ) {
1687     // Ignore layouts that are done with non-empty environments or
1688     // non-monomorphic layouts, as the user only wants to see the stuff
1689     // resulting from the final codegen session.
1690     if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
1691         return;
1692     }
1693
1694     // (delay format until we actually need it)
1695     let record = |kind, packed, opt_discr_size, variants| {
1696         let type_desc = format!("{:?}", layout.ty);
1697         cx.tcx.sess.code_stats.record_type_size(
1698             kind,
1699             type_desc,
1700             layout.align.abi,
1701             layout.size,
1702             packed,
1703             opt_discr_size,
1704             variants,
1705         );
1706     };
1707
1708     let adt_def = match *layout.ty.kind() {
1709         ty::Adt(ref adt_def, _) => {
1710             debug!("print-type-size t: `{:?}` process adt", layout.ty);
1711             adt_def
1712         }
1713
1714         ty::Closure(..) => {
1715             debug!("print-type-size t: `{:?}` record closure", layout.ty);
1716             record(DataTypeKind::Closure, false, None, vec![]);
1717             return;
1718         }
1719
1720         _ => {
1721             debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1722             return;
1723         }
1724     };
1725
1726     let adt_kind = adt_def.adt_kind();
1727     let adt_packed = adt_def.repr().pack.is_some();
1728
1729     let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1730         let mut min_size = Size::ZERO;
1731         let field_info: Vec<_> = flds
1732             .iter()
1733             .enumerate()
1734             .map(|(i, &name)| {
1735                 let field_layout = layout.field(cx, i);
1736                 let offset = layout.fields.offset(i);
1737                 let field_end = offset + field_layout.size;
1738                 if min_size < field_end {
1739                     min_size = field_end;
1740                 }
1741                 FieldInfo {
1742                     name,
1743                     offset: offset.bytes(),
1744                     size: field_layout.size.bytes(),
1745                     align: field_layout.align.abi.bytes(),
1746                 }
1747             })
1748             .collect();
1749
1750         VariantInfo {
1751             name: n,
1752             kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1753             align: layout.align.abi.bytes(),
1754             size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1755             fields: field_info,
1756         }
1757     };
1758
1759     match layout.variants {
1760         Variants::Single { index } => {
1761             if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1762                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
1763                 let variant_def = &adt_def.variant(index);
1764                 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1765                 record(
1766                     adt_kind.into(),
1767                     adt_packed,
1768                     None,
1769                     vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1770                 );
1771             } else {
1772                 // (This case arises for *empty* enums; so give it
1773                 // zero variants.)
1774                 record(adt_kind.into(), adt_packed, None, vec![]);
1775             }
1776         }
1777
1778         Variants::Multiple { tag, ref tag_encoding, .. } => {
1779             debug!(
1780                 "print-type-size `{:#?}` adt general variants def {}",
1781                 layout.ty,
1782                 adt_def.variants().len()
1783             );
1784             let variant_infos: Vec<_> = adt_def
1785                 .variants()
1786                 .iter_enumerated()
1787                 .map(|(i, variant_def)| {
1788                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1789                     build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
1790                 })
1791                 .collect();
1792             record(
1793                 adt_kind.into(),
1794                 adt_packed,
1795                 match tag_encoding {
1796                     TagEncoding::Direct => Some(tag.size(cx)),
1797                     _ => None,
1798                 },
1799                 variant_infos,
1800             );
1801         }
1802     }
1803 }