]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_ty_utils/src/layout.rs
optimize field ordering by grouping power-of-two arrays with larger types
[rust.git] / compiler / rustc_ty_utils / src / layout.rs
1 use rustc_hir as hir;
2 use rustc_index::bit_set::BitSet;
3 use rustc_index::vec::{Idx, IndexVec};
4 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
5 use rustc_middle::ty::layout::{
6     IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
7 };
8 use rustc_middle::ty::{
9     self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
10 };
11 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
12 use rustc_span::symbol::Symbol;
13 use rustc_span::DUMMY_SP;
14 use rustc_target::abi::*;
15
16 use std::cmp::{self, Ordering};
17 use std::iter;
18 use std::num::NonZeroUsize;
19 use std::ops::Bound;
20
21 use rand::{seq::SliceRandom, SeedableRng};
22 use rand_xoshiro::Xoshiro128StarStar;
23
24 use crate::layout_sanity_check::sanity_check_layout;
25
26 pub fn provide(providers: &mut ty::query::Providers) {
27     *providers = ty::query::Providers { layout_of, ..*providers };
28 }
29
30 #[instrument(skip(tcx, query), level = "debug")]
31 fn layout_of<'tcx>(
32     tcx: TyCtxt<'tcx>,
33     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
34 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
35     let (param_env, ty) = query.into_parts();
36     debug!(?ty);
37
38     let param_env = param_env.with_reveal_all_normalized(tcx);
39     let unnormalized_ty = ty;
40
41     // FIXME: We might want to have two different versions of `layout_of`:
42     // One that can be called after typecheck has completed and can use
43     // `normalize_erasing_regions` here and another one that can be called
44     // before typecheck has completed and uses `try_normalize_erasing_regions`.
45     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
46         Ok(t) => t,
47         Err(normalization_error) => {
48             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
49         }
50     };
51
52     if ty != unnormalized_ty {
53         // Ensure this layout is also cached for the normalized type.
54         return tcx.layout_of(param_env.and(ty));
55     }
56
57     let cx = LayoutCx { tcx, param_env };
58
59     let layout = layout_of_uncached(&cx, ty)?;
60     let layout = TyAndLayout { ty, layout };
61
62     record_layout_for_printing(&cx, layout);
63
64     sanity_check_layout(&cx, &layout);
65
66     Ok(layout)
67 }
68
69 #[derive(Copy, Clone, Debug)]
70 enum StructKind {
71     /// A tuple, closure, or univariant which cannot be coerced to unsized.
72     AlwaysSized,
73     /// A univariant, the last field of which may be coerced to unsized.
74     MaybeUnsized,
75     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
76     Prefixed(Size, Align),
77 }
78
79 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
80 // This is used to go between `memory_index` (source field order to memory order)
81 // and `inverse_memory_index` (memory order to source field order).
82 // See also `FieldsShape::Arbitrary::memory_index` for more details.
83 // FIXME(eddyb) build a better abstraction for permutations, if possible.
84 fn invert_mapping(map: &[u32]) -> Vec<u32> {
85     let mut inverse = vec![0; map.len()];
86     for i in 0..map.len() {
87         inverse[map[i] as usize] = i as u32;
88     }
89     inverse
90 }
91
92 fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
93     let dl = cx.data_layout();
94     let b_align = b.align(dl);
95     let align = a.align(dl).max(b_align).max(dl.aggregate_align);
96     let b_offset = a.size(dl).align_to(b_align.abi);
97     let size = (b_offset + b.size(dl)).align_to(align.abi);
98
99     // HACK(nox): We iter on `b` and then `a` because `max_by_key`
100     // returns the last maximum.
101     let largest_niche = Niche::from_scalar(dl, b_offset, b)
102         .into_iter()
103         .chain(Niche::from_scalar(dl, Size::ZERO, a))
104         .max_by_key(|niche| niche.available(dl));
105
106     LayoutS {
107         variants: Variants::Single { index: VariantIdx::new(0) },
108         fields: FieldsShape::Arbitrary {
109             offsets: vec![Size::ZERO, b_offset],
110             memory_index: vec![0, 1],
111         },
112         abi: Abi::ScalarPair(a, b),
113         largest_niche,
114         align,
115         size,
116     }
117 }
118
119 fn univariant_uninterned<'tcx>(
120     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
121     ty: Ty<'tcx>,
122     fields: &[TyAndLayout<'_>],
123     repr: &ReprOptions,
124     kind: StructKind,
125 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
126     let dl = cx.data_layout();
127     let pack = repr.pack;
128     if pack.is_some() && repr.align.is_some() {
129         cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
130         return Err(LayoutError::Unknown(ty));
131     }
132
133     let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
134
135     let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
136
137     let optimize = !repr.inhibit_struct_field_reordering_opt();
138     if optimize {
139         let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
140         let optimizing = &mut inverse_memory_index[..end];
141         let effective_field_align = |f: &TyAndLayout<'_>| {
142             if let Some(pack) = pack {
143                 f.align.abi.min(pack)
144             } else if f.size.bytes().is_power_of_two() && f.size.bytes() >= f.align.abi.bytes() {
145                 // Try to put fields which have a 2^n size and smaller alignment together with
146                 // fields that have an alignment matching that size.
147                 // E.g. group [u8; 4] with u32 fields
148                 Align::from_bytes(f.align.abi.bytes()).unwrap_or(f.align.abi)
149             } else {
150                 f.align.abi
151             }
152         };
153
154         // If `-Z randomize-layout` was enabled for the type definition we can shuffle
155         // the field ordering to try and catch some code making assumptions about layouts
156         // we don't guarantee
157         if repr.can_randomize_type_layout() {
158             // `ReprOptions.layout_seed` is a deterministic seed that we can use to
159             // randomize field ordering with
160             let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
161
162             // Shuffle the ordering of the fields
163             optimizing.shuffle(&mut rng);
164
165             // Otherwise we just leave things alone and actually optimize the type's fields
166         } else {
167             match kind {
168                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
169                     optimizing.sort_by_key(|&x| {
170                         // Place ZSTs first to avoid "interesting offsets",
171                         // especially with only one or two non-ZST fields.
172                         let f = &fields[x as usize];
173                         (!f.is_zst(), cmp::Reverse(effective_field_align(f)))
174                     });
175                 }
176
177                 StructKind::Prefixed(..) => {
178                     // Sort in ascending alignment so that the layout stays optimal
179                     // regardless of the prefix
180                     optimizing.sort_by_key(|&x| effective_field_align(&fields[x as usize]));
181                 }
182             }
183
184             // FIXME(Kixiron): We can always shuffle fields within a given alignment class
185             //                 regardless of the status of `-Z randomize-layout`
186         }
187     }
188
189     // inverse_memory_index holds field indices by increasing memory offset.
190     // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
191     // We now write field offsets to the corresponding offset slot;
192     // field 5 with offset 0 puts 0 in offsets[5].
193     // At the bottom of this function, we invert `inverse_memory_index` to
194     // produce `memory_index` (see `invert_mapping`).
195
196     let mut sized = true;
197     let mut offsets = vec![Size::ZERO; fields.len()];
198     let mut offset = Size::ZERO;
199     let mut largest_niche = None;
200     let mut largest_niche_available = 0;
201
202     if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
203         let prefix_align =
204             if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
205         align = align.max(AbiAndPrefAlign::new(prefix_align));
206         offset = prefix_size.align_to(prefix_align);
207     }
208
209     for &i in &inverse_memory_index {
210         let field = fields[i as usize];
211         if !sized {
212             cx.tcx.sess.delay_span_bug(
213                 DUMMY_SP,
214                 &format!(
215                     "univariant: field #{} of `{}` comes after unsized field",
216                     offsets.len(),
217                     ty
218                 ),
219             );
220         }
221
222         if field.is_unsized() {
223             sized = false;
224         }
225
226         // Invariant: offset < dl.obj_size_bound() <= 1<<61
227         let field_align = if let Some(pack) = pack {
228             field.align.min(AbiAndPrefAlign::new(pack))
229         } else {
230             field.align
231         };
232         offset = offset.align_to(field_align.abi);
233         align = align.max(field_align);
234
235         debug!("univariant offset: {:?} field: {:#?}", offset, field);
236         offsets[i as usize] = offset;
237
238         if let Some(mut niche) = field.largest_niche {
239             let available = niche.available(dl);
240             if available > largest_niche_available {
241                 largest_niche_available = available;
242                 niche.offset += offset;
243                 largest_niche = Some(niche);
244             }
245         }
246
247         offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
248     }
249
250     if let Some(repr_align) = repr.align {
251         align = align.max(AbiAndPrefAlign::new(repr_align));
252     }
253
254     debug!("univariant min_size: {:?}", offset);
255     let min_size = offset;
256
257     // As stated above, inverse_memory_index holds field indices by increasing offset.
258     // This makes it an already-sorted view of the offsets vec.
259     // To invert it, consider:
260     // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
261     // Field 5 would be the first element, so memory_index is i:
262     // Note: if we didn't optimize, it's already right.
263
264     let memory_index =
265         if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
266
267     let size = min_size.align_to(align.abi);
268     let mut abi = Abi::Aggregate { sized };
269
270     // Unpack newtype ABIs and find scalar pairs.
271     if sized && size.bytes() > 0 {
272         // All other fields must be ZSTs.
273         let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
274
275         match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
276             // We have exactly one non-ZST field.
277             (Some((i, field)), None, None) => {
278                 // Field fills the struct and it has a scalar or scalar pair ABI.
279                 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
280                     match field.abi {
281                         // For plain scalars, or vectors of them, we can't unpack
282                         // newtypes for `#[repr(C)]`, as that affects C ABIs.
283                         Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
284                             abi = field.abi;
285                         }
286                         // But scalar pairs are Rust-specific and get
287                         // treated as aggregates by C ABIs anyway.
288                         Abi::ScalarPair(..) => {
289                             abi = field.abi;
290                         }
291                         _ => {}
292                     }
293                 }
294             }
295
296             // Two non-ZST fields, and they're both scalars.
297             (Some((i, a)), Some((j, b)), None) => {
298                 match (a.abi, b.abi) {
299                     (Abi::Scalar(a), Abi::Scalar(b)) => {
300                         // Order by the memory placement, not source order.
301                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
302                             ((i, a), (j, b))
303                         } else {
304                             ((j, b), (i, a))
305                         };
306                         let pair = scalar_pair(cx, a, b);
307                         let pair_offsets = match pair.fields {
308                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
309                                 assert_eq!(memory_index, &[0, 1]);
310                                 offsets
311                             }
312                             _ => bug!(),
313                         };
314                         if offsets[i] == pair_offsets[0]
315                             && offsets[j] == pair_offsets[1]
316                             && align == pair.align
317                             && size == pair.size
318                         {
319                             // We can use `ScalarPair` only when it matches our
320                             // already computed layout (including `#[repr(C)]`).
321                             abi = pair.abi;
322                         }
323                     }
324                     _ => {}
325                 }
326             }
327
328             _ => {}
329         }
330     }
331
332     if fields.iter().any(|f| f.abi.is_uninhabited()) {
333         abi = Abi::Uninhabited;
334     }
335
336     Ok(LayoutS {
337         variants: Variants::Single { index: VariantIdx::new(0) },
338         fields: FieldsShape::Arbitrary { offsets, memory_index },
339         abi,
340         largest_niche,
341         align,
342         size,
343     })
344 }
345
346 fn layout_of_uncached<'tcx>(
347     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
348     ty: Ty<'tcx>,
349 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
350     let tcx = cx.tcx;
351     let param_env = cx.param_env;
352     let dl = cx.data_layout();
353     let scalar_unit = |value: Primitive| {
354         let size = value.size(dl);
355         assert!(size.bits() <= 128);
356         Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
357     };
358     let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
359
360     let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
361         Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
362     };
363     debug_assert!(!ty.has_non_region_infer());
364
365     Ok(match *ty.kind() {
366         // Basic scalars.
367         ty::Bool => tcx.intern_layout(LayoutS::scalar(
368             cx,
369             Scalar::Initialized {
370                 value: Int(I8, false),
371                 valid_range: WrappingRange { start: 0, end: 1 },
372             },
373         )),
374         ty::Char => tcx.intern_layout(LayoutS::scalar(
375             cx,
376             Scalar::Initialized {
377                 value: Int(I32, false),
378                 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
379             },
380         )),
381         ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
382         ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
383         ty::Float(fty) => scalar(match fty {
384             ty::FloatTy::F32 => F32,
385             ty::FloatTy::F64 => F64,
386         }),
387         ty::FnPtr(_) => {
388             let mut ptr = scalar_unit(Pointer);
389             ptr.valid_range_mut().start = 1;
390             tcx.intern_layout(LayoutS::scalar(cx, ptr))
391         }
392
393         // The never type.
394         ty::Never => tcx.intern_layout(LayoutS {
395             variants: Variants::Single { index: VariantIdx::new(0) },
396             fields: FieldsShape::Primitive,
397             abi: Abi::Uninhabited,
398             largest_niche: None,
399             align: dl.i8_align,
400             size: Size::ZERO,
401         }),
402
403         // Potentially-wide pointers.
404         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
405             let mut data_ptr = scalar_unit(Pointer);
406             if !ty.is_unsafe_ptr() {
407                 data_ptr.valid_range_mut().start = 1;
408             }
409
410             let pointee = tcx.normalize_erasing_regions(param_env, pointee);
411             if pointee.is_sized(tcx, param_env) {
412                 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
413             }
414
415             let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
416             let metadata = match unsized_part.kind() {
417                 ty::Foreign(..) => {
418                     return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
419                 }
420                 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
421                 ty::Dynamic(..) => {
422                     let mut vtable = scalar_unit(Pointer);
423                     vtable.valid_range_mut().start = 1;
424                     vtable
425                 }
426                 _ => return Err(LayoutError::Unknown(unsized_part)),
427             };
428
429             // Effectively a (ptr, meta) tuple.
430             tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
431         }
432
433         ty::Dynamic(_, _, ty::DynStar) => {
434             let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
435             data.valid_range_mut().start = 0;
436             let mut vtable = scalar_unit(Pointer);
437             vtable.valid_range_mut().start = 1;
438             tcx.intern_layout(scalar_pair(cx, data, vtable))
439         }
440
441         // Arrays and slices.
442         ty::Array(element, mut count) => {
443             if count.has_projections() {
444                 count = tcx.normalize_erasing_regions(param_env, count);
445                 if count.has_projections() {
446                     return Err(LayoutError::Unknown(ty));
447                 }
448             }
449
450             let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
451             let element = cx.layout_of(element)?;
452             let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
453
454             let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
455                 Abi::Uninhabited
456             } else {
457                 Abi::Aggregate { sized: true }
458             };
459
460             let largest_niche = if count != 0 { element.largest_niche } else { None };
461
462             tcx.intern_layout(LayoutS {
463                 variants: Variants::Single { index: VariantIdx::new(0) },
464                 fields: FieldsShape::Array { stride: element.size, count },
465                 abi,
466                 largest_niche,
467                 align: element.align,
468                 size,
469             })
470         }
471         ty::Slice(element) => {
472             let element = cx.layout_of(element)?;
473             tcx.intern_layout(LayoutS {
474                 variants: Variants::Single { index: VariantIdx::new(0) },
475                 fields: FieldsShape::Array { stride: element.size, count: 0 },
476                 abi: Abi::Aggregate { sized: false },
477                 largest_niche: None,
478                 align: element.align,
479                 size: Size::ZERO,
480             })
481         }
482         ty::Str => tcx.intern_layout(LayoutS {
483             variants: Variants::Single { index: VariantIdx::new(0) },
484             fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
485             abi: Abi::Aggregate { sized: false },
486             largest_niche: None,
487             align: dl.i8_align,
488             size: Size::ZERO,
489         }),
490
491         // Odd unit types.
492         ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
493         ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
494             let mut unit = univariant_uninterned(
495                 cx,
496                 ty,
497                 &[],
498                 &ReprOptions::default(),
499                 StructKind::AlwaysSized,
500             )?;
501             match unit.abi {
502                 Abi::Aggregate { ref mut sized } => *sized = false,
503                 _ => bug!(),
504             }
505             tcx.intern_layout(unit)
506         }
507
508         ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
509
510         ty::Closure(_, ref substs) => {
511             let tys = substs.as_closure().upvar_tys();
512             univariant(
513                 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
514                 &ReprOptions::default(),
515                 StructKind::AlwaysSized,
516             )?
517         }
518
519         ty::Tuple(tys) => {
520             let kind =
521                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
522
523             univariant(
524                 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
525                 &ReprOptions::default(),
526                 kind,
527             )?
528         }
529
530         // SIMD vector types.
531         ty::Adt(def, substs) if def.repr().simd() => {
532             if !def.is_struct() {
533                 // Should have yielded E0517 by now.
534                 tcx.sess.delay_span_bug(
535                     DUMMY_SP,
536                     "#[repr(simd)] was applied to an ADT that is not a struct",
537                 );
538                 return Err(LayoutError::Unknown(ty));
539             }
540
541             // Supported SIMD vectors are homogeneous ADTs with at least one field:
542             //
543             // * #[repr(simd)] struct S(T, T, T, T);
544             // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
545             // * #[repr(simd)] struct S([T; 4])
546             //
547             // where T is a primitive scalar (integer/float/pointer).
548
549             // SIMD vectors with zero fields are not supported.
550             // (should be caught by typeck)
551             if def.non_enum_variant().fields.is_empty() {
552                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
553             }
554
555             // Type of the first ADT field:
556             let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
557
558             // Heterogeneous SIMD vectors are not supported:
559             // (should be caught by typeck)
560             for fi in &def.non_enum_variant().fields {
561                 if fi.ty(tcx, substs) != f0_ty {
562                     tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
563                 }
564             }
565
566             // The element type and number of elements of the SIMD vector
567             // are obtained from:
568             //
569             // * the element type and length of the single array field, if
570             // the first field is of array type, or
571             //
572             // * the homogeneous field type and the number of fields.
573             let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
574                 // First ADT field is an array:
575
576                 // SIMD vectors with multiple array fields are not supported:
577                 // (should be caught by typeck)
578                 if def.non_enum_variant().fields.len() != 1 {
579                     tcx.sess.fatal(&format!(
580                         "monomorphising SIMD type `{}` with more than one array field",
581                         ty
582                     ));
583                 }
584
585                 // Extract the number of elements from the layout of the array field:
586                 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
587                         return Err(LayoutError::Unknown(ty));
588                     };
589
590                 (*e_ty, *count, true)
591             } else {
592                 // First ADT field is not an array:
593                 (f0_ty, def.non_enum_variant().fields.len() as _, false)
594             };
595
596             // SIMD vectors of zero length are not supported.
597             // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
598             // support.
599             //
600             // Can't be caught in typeck if the array length is generic.
601             if e_len == 0 {
602                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
603             } else if e_len > MAX_SIMD_LANES {
604                 tcx.sess.fatal(&format!(
605                     "monomorphising SIMD type `{}` of length greater than {}",
606                     ty, MAX_SIMD_LANES,
607                 ));
608             }
609
610             // Compute the ABI of the element type:
611             let e_ly = cx.layout_of(e_ty)?;
612             let Abi::Scalar(e_abi) = e_ly.abi else {
613                     // This error isn't caught in typeck, e.g., if
614                     // the element type of the vector is generic.
615                     tcx.sess.fatal(&format!(
616                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
617                         (integer/float/pointer) element type `{}`",
618                         ty, e_ty
619                     ))
620                 };
621
622             // Compute the size and alignment of the vector:
623             let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
624             let align = dl.vector_align(size);
625             let size = size.align_to(align.abi);
626
627             // Compute the placement of the vector fields:
628             let fields = if is_array {
629                 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
630             } else {
631                 FieldsShape::Array { stride: e_ly.size, count: e_len }
632             };
633
634             tcx.intern_layout(LayoutS {
635                 variants: Variants::Single { index: VariantIdx::new(0) },
636                 fields,
637                 abi: Abi::Vector { element: e_abi, count: e_len },
638                 largest_niche: e_ly.largest_niche,
639                 size,
640                 align,
641             })
642         }
643
644         // ADTs.
645         ty::Adt(def, substs) => {
646             // Cache the field layouts.
647             let variants = def
648                 .variants()
649                 .iter()
650                 .map(|v| {
651                     v.fields
652                         .iter()
653                         .map(|field| cx.layout_of(field.ty(tcx, substs)))
654                         .collect::<Result<Vec<_>, _>>()
655                 })
656                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
657
658             if def.is_union() {
659                 if def.repr().pack.is_some() && def.repr().align.is_some() {
660                     cx.tcx.sess.delay_span_bug(
661                         tcx.def_span(def.did()),
662                         "union cannot be packed and aligned",
663                     );
664                     return Err(LayoutError::Unknown(ty));
665                 }
666
667                 let mut align =
668                     if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
669
670                 if let Some(repr_align) = def.repr().align {
671                     align = align.max(AbiAndPrefAlign::new(repr_align));
672                 }
673
674                 let optimize = !def.repr().inhibit_union_abi_opt();
675                 let mut size = Size::ZERO;
676                 let mut abi = Abi::Aggregate { sized: true };
677                 let index = VariantIdx::new(0);
678                 for field in &variants[index] {
679                     assert!(field.is_sized());
680                     align = align.max(field.align);
681
682                     // If all non-ZST fields have the same ABI, forward this ABI
683                     if optimize && !field.is_zst() {
684                         // Discard valid range information and allow undef
685                         let field_abi = match field.abi {
686                             Abi::Scalar(x) => Abi::Scalar(x.to_union()),
687                             Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
688                             Abi::Vector { element: x, count } => {
689                                 Abi::Vector { element: x.to_union(), count }
690                             }
691                             Abi::Uninhabited | Abi::Aggregate { .. } => {
692                                 Abi::Aggregate { sized: true }
693                             }
694                         };
695
696                         if size == Size::ZERO {
697                             // first non ZST: initialize 'abi'
698                             abi = field_abi;
699                         } else if abi != field_abi {
700                             // different fields have different ABI: reset to Aggregate
701                             abi = Abi::Aggregate { sized: true };
702                         }
703                     }
704
705                     size = cmp::max(size, field.size);
706                 }
707
708                 if let Some(pack) = def.repr().pack {
709                     align = align.min(AbiAndPrefAlign::new(pack));
710                 }
711
712                 return Ok(tcx.intern_layout(LayoutS {
713                     variants: Variants::Single { index },
714                     fields: FieldsShape::Union(
715                         NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
716                     ),
717                     abi,
718                     largest_niche: None,
719                     align,
720                     size: size.align_to(align.abi),
721                 }));
722             }
723
724             // A variant is absent if it's uninhabited and only has ZST fields.
725             // Present uninhabited variants only require space for their fields,
726             // but *not* an encoding of the discriminant (e.g., a tag value).
727             // See issue #49298 for more details on the need to leave space
728             // for non-ZST uninhabited data (mostly partial initialization).
729             let absent = |fields: &[TyAndLayout<'_>]| {
730                 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
731                 let is_zst = fields.iter().all(|f| f.is_zst());
732                 uninhabited && is_zst
733             };
734             let (present_first, present_second) = {
735                 let mut present_variants = variants
736                     .iter_enumerated()
737                     .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
738                 (present_variants.next(), present_variants.next())
739             };
740             let present_first = match present_first {
741                 Some(present_first) => present_first,
742                 // Uninhabited because it has no variants, or only absent ones.
743                 None if def.is_enum() => {
744                     return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
745                 }
746                 // If it's a struct, still compute a layout so that we can still compute the
747                 // field offsets.
748                 None => VariantIdx::new(0),
749             };
750
751             let is_struct = !def.is_enum() ||
752                     // Only one variant is present.
753                     (present_second.is_none() &&
754                         // Representation optimizations are allowed.
755                         !def.repr().inhibit_enum_layout_opt());
756             if is_struct {
757                 // Struct, or univariant enum equivalent to a struct.
758                 // (Typechecking will reject discriminant-sizing attrs.)
759
760                 let v = present_first;
761                 let kind = if def.is_enum() || variants[v].is_empty() {
762                     StructKind::AlwaysSized
763                 } else {
764                     let param_env = tcx.param_env(def.did());
765                     let last_field = def.variant(v).fields.last().unwrap();
766                     let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
767                     if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
768                 };
769
770                 let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
771                 st.variants = Variants::Single { index: v };
772
773                 if def.is_unsafe_cell() {
774                     let hide_niches = |scalar: &mut _| match scalar {
775                         Scalar::Initialized { value, valid_range } => {
776                             *valid_range = WrappingRange::full(value.size(dl))
777                         }
778                         // Already doesn't have any niches
779                         Scalar::Union { .. } => {}
780                     };
781                     match &mut st.abi {
782                         Abi::Uninhabited => {}
783                         Abi::Scalar(scalar) => hide_niches(scalar),
784                         Abi::ScalarPair(a, b) => {
785                             hide_niches(a);
786                             hide_niches(b);
787                         }
788                         Abi::Vector { element, count: _ } => hide_niches(element),
789                         Abi::Aggregate { sized: _ } => {}
790                     }
791                     st.largest_niche = None;
792                     return Ok(tcx.intern_layout(st));
793                 }
794
795                 let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
796                 match st.abi {
797                     Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
798                         // the asserts ensure that we are not using the
799                         // `#[rustc_layout_scalar_valid_range(n)]`
800                         // attribute to widen the range of anything as that would probably
801                         // result in UB somewhere
802                         // FIXME(eddyb) the asserts are probably not needed,
803                         // as larger validity ranges would result in missed
804                         // optimizations, *not* wrongly assuming the inner
805                         // value is valid. e.g. unions enlarge validity ranges,
806                         // because the values may be uninitialized.
807                         if let Bound::Included(start) = start {
808                             // FIXME(eddyb) this might be incorrect - it doesn't
809                             // account for wrap-around (end < start) ranges.
810                             let valid_range = scalar.valid_range_mut();
811                             assert!(valid_range.start <= start);
812                             valid_range.start = start;
813                         }
814                         if let Bound::Included(end) = end {
815                             // FIXME(eddyb) this might be incorrect - it doesn't
816                             // account for wrap-around (end < start) ranges.
817                             let valid_range = scalar.valid_range_mut();
818                             assert!(valid_range.end >= end);
819                             valid_range.end = end;
820                         }
821
822                         // Update `largest_niche` if we have introduced a larger niche.
823                         let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
824                         if let Some(niche) = niche {
825                             match st.largest_niche {
826                                 Some(largest_niche) => {
827                                     // Replace the existing niche even if they're equal,
828                                     // because this one is at a lower offset.
829                                     if largest_niche.available(dl) <= niche.available(dl) {
830                                         st.largest_niche = Some(niche);
831                                     }
832                                 }
833                                 None => st.largest_niche = Some(niche),
834                             }
835                         }
836                     }
837                     _ => assert!(
838                         start == Bound::Unbounded && end == Bound::Unbounded,
839                         "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
840                         def,
841                         st,
842                     ),
843                 }
844
845                 return Ok(tcx.intern_layout(st));
846             }
847
848             // At this point, we have handled all unions and
849             // structs. (We have also handled univariant enums
850             // that allow representation optimization.)
851             assert!(def.is_enum());
852
853             // Until we've decided whether to use the tagged or
854             // niche filling LayoutS, we don't want to intern the
855             // variant layouts, so we can't store them in the
856             // overall LayoutS. Store the overall LayoutS
857             // and the variant LayoutSs here until then.
858             struct TmpLayout<'tcx> {
859                 layout: LayoutS<'tcx>,
860                 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
861             }
862
863             let calculate_niche_filling_layout =
864                 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
865                     // The current code for niche-filling relies on variant indices
866                     // instead of actual discriminants, so enums with
867                     // explicit discriminants (RFC #2363) would misbehave.
868                     if def.repr().inhibit_enum_layout_opt()
869                         || def
870                             .variants()
871                             .iter_enumerated()
872                             .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
873                     {
874                         return Ok(None);
875                     }
876
877                     if variants.len() < 2 {
878                         return Ok(None);
879                     }
880
881                     let mut align = dl.aggregate_align;
882                     let mut variant_layouts = variants
883                         .iter_enumerated()
884                         .map(|(j, v)| {
885                             let mut st = univariant_uninterned(
886                                 cx,
887                                 ty,
888                                 v,
889                                 &def.repr(),
890                                 StructKind::AlwaysSized,
891                             )?;
892                             st.variants = Variants::Single { index: j };
893
894                             align = align.max(st.align);
895
896                             Ok(st)
897                         })
898                         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
899
900                     let largest_variant_index = match variant_layouts
901                         .iter_enumerated()
902                         .max_by_key(|(_i, layout)| layout.size.bytes())
903                         .map(|(i, _layout)| i)
904                     {
905                         None => return Ok(None),
906                         Some(i) => i,
907                     };
908
909                     let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
910                     let needs_disc = |index: VariantIdx| {
911                         index != largest_variant_index && !absent(&variants[index])
912                     };
913                     let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
914                         ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
915
916                     let count = niche_variants.size_hint().1.unwrap() as u128;
917
918                     // Find the field with the largest niche
919                     let (field_index, niche, (niche_start, niche_scalar)) = match variants
920                         [largest_variant_index]
921                         .iter()
922                         .enumerate()
923                         .filter_map(|(j, field)| Some((j, field.largest_niche?)))
924                         .max_by_key(|(_, niche)| niche.available(dl))
925                         .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
926                     {
927                         None => return Ok(None),
928                         Some(x) => x,
929                     };
930
931                     let niche_offset = niche.offset
932                         + variant_layouts[largest_variant_index].fields.offset(field_index);
933                     let niche_size = niche.value.size(dl);
934                     let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
935
936                     let all_variants_fit =
937                         variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
938                             if i == largest_variant_index {
939                                 return true;
940                             }
941
942                             layout.largest_niche = None;
943
944                             if layout.size <= niche_offset {
945                                 // This variant will fit before the niche.
946                                 return true;
947                             }
948
949                             // Determine if it'll fit after the niche.
950                             let this_align = layout.align.abi;
951                             let this_offset = (niche_offset + niche_size).align_to(this_align);
952
953                             if this_offset + layout.size > size {
954                                 return false;
955                             }
956
957                             // It'll fit, but we need to make some adjustments.
958                             match layout.fields {
959                                 FieldsShape::Arbitrary { ref mut offsets, .. } => {
960                                     for (j, offset) in offsets.iter_mut().enumerate() {
961                                         if !variants[i][j].is_zst() {
962                                             *offset += this_offset;
963                                         }
964                                     }
965                                 }
966                                 _ => {
967                                     panic!("Layout of fields should be Arbitrary for variants")
968                                 }
969                             }
970
971                             // It can't be a Scalar or ScalarPair because the offset isn't 0.
972                             if !layout.abi.is_uninhabited() {
973                                 layout.abi = Abi::Aggregate { sized: true };
974                             }
975                             layout.size += this_offset;
976
977                             true
978                         });
979
980                     if !all_variants_fit {
981                         return Ok(None);
982                     }
983
984                     let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
985
986                     let others_zst = variant_layouts
987                         .iter_enumerated()
988                         .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
989                     let same_size = size == variant_layouts[largest_variant_index].size;
990                     let same_align = align == variant_layouts[largest_variant_index].align;
991
992                     let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
993                         Abi::Uninhabited
994                     } else if same_size && same_align && others_zst {
995                         match variant_layouts[largest_variant_index].abi {
996                             // When the total alignment and size match, we can use the
997                             // same ABI as the scalar variant with the reserved niche.
998                             Abi::Scalar(_) => Abi::Scalar(niche_scalar),
999                             Abi::ScalarPair(first, second) => {
1000                                 // Only the niche is guaranteed to be initialised,
1001                                 // so use union layouts for the other primitive.
1002                                 if niche_offset == Size::ZERO {
1003                                     Abi::ScalarPair(niche_scalar, second.to_union())
1004                                 } else {
1005                                     Abi::ScalarPair(first.to_union(), niche_scalar)
1006                                 }
1007                             }
1008                             _ => Abi::Aggregate { sized: true },
1009                         }
1010                     } else {
1011                         Abi::Aggregate { sized: true }
1012                     };
1013
1014                     let layout = LayoutS {
1015                         variants: Variants::Multiple {
1016                             tag: niche_scalar,
1017                             tag_encoding: TagEncoding::Niche {
1018                                 untagged_variant: largest_variant_index,
1019                                 niche_variants,
1020                                 niche_start,
1021                             },
1022                             tag_field: 0,
1023                             variants: IndexVec::new(),
1024                         },
1025                         fields: FieldsShape::Arbitrary {
1026                             offsets: vec![niche_offset],
1027                             memory_index: vec![0],
1028                         },
1029                         abi,
1030                         largest_niche,
1031                         size,
1032                         align,
1033                     };
1034
1035                     Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1036                 };
1037
1038             let niche_filling_layout = calculate_niche_filling_layout()?;
1039
1040             let (mut min, mut max) = (i128::MAX, i128::MIN);
1041             let discr_type = def.repr().discr_type();
1042             let bits = Integer::from_attr(cx, discr_type).size().bits();
1043             for (i, discr) in def.discriminants(tcx) {
1044                 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1045                     continue;
1046                 }
1047                 let mut x = discr.val as i128;
1048                 if discr_type.is_signed() {
1049                     // sign extend the raw representation to be an i128
1050                     x = (x << (128 - bits)) >> (128 - bits);
1051                 }
1052                 if x < min {
1053                     min = x;
1054                 }
1055                 if x > max {
1056                     max = x;
1057                 }
1058             }
1059             // We might have no inhabited variants, so pretend there's at least one.
1060             if (min, max) == (i128::MAX, i128::MIN) {
1061                 min = 0;
1062                 max = 0;
1063             }
1064             assert!(min <= max, "discriminant range is {}...{}", min, max);
1065             let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1066
1067             let mut align = dl.aggregate_align;
1068             let mut size = Size::ZERO;
1069
1070             // We're interested in the smallest alignment, so start large.
1071             let mut start_align = Align::from_bytes(256).unwrap();
1072             assert_eq!(Integer::for_align(dl, start_align), None);
1073
1074             // repr(C) on an enum tells us to make a (tag, union) layout,
1075             // so we need to grow the prefix alignment to be at least
1076             // the alignment of the union. (This value is used both for
1077             // determining the alignment of the overall enum, and the
1078             // determining the alignment of the payload after the tag.)
1079             let mut prefix_align = min_ity.align(dl).abi;
1080             if def.repr().c() {
1081                 for fields in &variants {
1082                     for field in fields {
1083                         prefix_align = prefix_align.max(field.align.abi);
1084                     }
1085                 }
1086             }
1087
1088             // Create the set of structs that represent each variant.
1089             let mut layout_variants = variants
1090                 .iter_enumerated()
1091                 .map(|(i, field_layouts)| {
1092                     let mut st = univariant_uninterned(
1093                         cx,
1094                         ty,
1095                         &field_layouts,
1096                         &def.repr(),
1097                         StructKind::Prefixed(min_ity.size(), prefix_align),
1098                     )?;
1099                     st.variants = Variants::Single { index: i };
1100                     // Find the first field we can't move later
1101                     // to make room for a larger discriminant.
1102                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1103                         if !field.is_zst() || field.align.abi.bytes() != 1 {
1104                             start_align = start_align.min(field.align.abi);
1105                             break;
1106                         }
1107                     }
1108                     size = cmp::max(size, st.size);
1109                     align = align.max(st.align);
1110                     Ok(st)
1111                 })
1112                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1113
1114             // Align the maximum variant size to the largest alignment.
1115             size = size.align_to(align.abi);
1116
1117             if size.bytes() >= dl.obj_size_bound() {
1118                 return Err(LayoutError::SizeOverflow(ty));
1119             }
1120
1121             let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1122             if typeck_ity < min_ity {
1123                 // It is a bug if Layout decided on a greater discriminant size than typeck for
1124                 // some reason at this point (based on values discriminant can take on). Mostly
1125                 // because this discriminant will be loaded, and then stored into variable of
1126                 // type calculated by typeck. Consider such case (a bug): typeck decided on
1127                 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1128                 // discriminant values. That would be a bug, because then, in codegen, in order
1129                 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1130                 // space necessary to represent would have to be discarded (or layout is wrong
1131                 // on thinking it needs 16 bits)
1132                 bug!(
1133                     "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1134                     min_ity,
1135                     typeck_ity
1136                 );
1137                 // However, it is fine to make discr type however large (as an optimisation)
1138                 // after this point â€“ we’ll just truncate the value we load in codegen.
1139             }
1140
1141             // Check to see if we should use a different type for the
1142             // discriminant. We can safely use a type with the same size
1143             // as the alignment of the first field of each variant.
1144             // We increase the size of the discriminant to avoid LLVM copying
1145             // padding when it doesn't need to. This normally causes unaligned
1146             // load/stores and excessive memcpy/memset operations. By using a
1147             // bigger integer size, LLVM can be sure about its contents and
1148             // won't be so conservative.
1149
1150             // Use the initial field alignment
1151             let mut ity = if def.repr().c() || def.repr().int.is_some() {
1152                 min_ity
1153             } else {
1154                 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1155             };
1156
1157             // If the alignment is not larger than the chosen discriminant size,
1158             // don't use the alignment as the final size.
1159             if ity <= min_ity {
1160                 ity = min_ity;
1161             } else {
1162                 // Patch up the variants' first few fields.
1163                 let old_ity_size = min_ity.size();
1164                 let new_ity_size = ity.size();
1165                 for variant in &mut layout_variants {
1166                     match variant.fields {
1167                         FieldsShape::Arbitrary { ref mut offsets, .. } => {
1168                             for i in offsets {
1169                                 if *i <= old_ity_size {
1170                                     assert_eq!(*i, old_ity_size);
1171                                     *i = new_ity_size;
1172                                 }
1173                             }
1174                             // We might be making the struct larger.
1175                             if variant.size <= old_ity_size {
1176                                 variant.size = new_ity_size;
1177                             }
1178                         }
1179                         _ => bug!(),
1180                     }
1181                 }
1182             }
1183
1184             let tag_mask = ity.size().unsigned_int_max();
1185             let tag = Scalar::Initialized {
1186                 value: Int(ity, signed),
1187                 valid_range: WrappingRange {
1188                     start: (min as u128 & tag_mask),
1189                     end: (max as u128 & tag_mask),
1190                 },
1191             };
1192             let mut abi = Abi::Aggregate { sized: true };
1193
1194             if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1195                 abi = Abi::Uninhabited;
1196             } else if tag.size(dl) == size {
1197                 // Make sure we only use scalar layout when the enum is entirely its
1198                 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1199                 abi = Abi::Scalar(tag);
1200             } else {
1201                 // Try to use a ScalarPair for all tagged enums.
1202                 let mut common_prim = None;
1203                 let mut common_prim_initialized_in_all_variants = true;
1204                 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1205                     let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1206                             bug!();
1207                         };
1208                     let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1209                     let (field, offset) = match (fields.next(), fields.next()) {
1210                         (None, None) => {
1211                             common_prim_initialized_in_all_variants = false;
1212                             continue;
1213                         }
1214                         (Some(pair), None) => pair,
1215                         _ => {
1216                             common_prim = None;
1217                             break;
1218                         }
1219                     };
1220                     let prim = match field.abi {
1221                         Abi::Scalar(scalar) => {
1222                             common_prim_initialized_in_all_variants &=
1223                                 matches!(scalar, Scalar::Initialized { .. });
1224                             scalar.primitive()
1225                         }
1226                         _ => {
1227                             common_prim = None;
1228                             break;
1229                         }
1230                     };
1231                     if let Some(pair) = common_prim {
1232                         // This is pretty conservative. We could go fancier
1233                         // by conflating things like i32 and u32, or even
1234                         // realising that (u8, u8) could just cohabit with
1235                         // u16 or even u32.
1236                         if pair != (prim, offset) {
1237                             common_prim = None;
1238                             break;
1239                         }
1240                     } else {
1241                         common_prim = Some((prim, offset));
1242                     }
1243                 }
1244                 if let Some((prim, offset)) = common_prim {
1245                     let prim_scalar = if common_prim_initialized_in_all_variants {
1246                         scalar_unit(prim)
1247                     } else {
1248                         // Common prim might be uninit.
1249                         Scalar::Union { value: prim }
1250                     };
1251                     let pair = scalar_pair(cx, tag, prim_scalar);
1252                     let pair_offsets = match pair.fields {
1253                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1254                             assert_eq!(memory_index, &[0, 1]);
1255                             offsets
1256                         }
1257                         _ => bug!(),
1258                     };
1259                     if pair_offsets[0] == Size::ZERO
1260                         && pair_offsets[1] == *offset
1261                         && align == pair.align
1262                         && size == pair.size
1263                     {
1264                         // We can use `ScalarPair` only when it matches our
1265                         // already computed layout (including `#[repr(C)]`).
1266                         abi = pair.abi;
1267                     }
1268                 }
1269             }
1270
1271             // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1272             // variants to ensure they are consistent. This is because a downcast is
1273             // semantically a NOP, and thus should not affect layout.
1274             if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1275                 for variant in &mut layout_variants {
1276                     // We only do this for variants with fields; the others are not accessed anyway.
1277                     // Also do not overwrite any already existing "clever" ABIs.
1278                     if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
1279                         variant.abi = abi;
1280                         // Also need to bump up the size and alignment, so that the entire value fits in here.
1281                         variant.size = cmp::max(variant.size, size);
1282                         variant.align.abi = cmp::max(variant.align.abi, align.abi);
1283                     }
1284                 }
1285             }
1286
1287             let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1288
1289             let tagged_layout = LayoutS {
1290                 variants: Variants::Multiple {
1291                     tag,
1292                     tag_encoding: TagEncoding::Direct,
1293                     tag_field: 0,
1294                     variants: IndexVec::new(),
1295                 },
1296                 fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
1297                 largest_niche,
1298                 abi,
1299                 align,
1300                 size,
1301             };
1302
1303             let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1304
1305             let mut best_layout = match (tagged_layout, niche_filling_layout) {
1306                 (tl, Some(nl)) => {
1307                     // Pick the smaller layout; otherwise,
1308                     // pick the layout with the larger niche; otherwise,
1309                     // pick tagged as it has simpler codegen.
1310                     use Ordering::*;
1311                     let niche_size = |tmp_l: &TmpLayout<'_>| {
1312                         tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1313                     };
1314                     match (
1315                         tl.layout.size.cmp(&nl.layout.size),
1316                         niche_size(&tl).cmp(&niche_size(&nl)),
1317                     ) {
1318                         (Greater, _) => nl,
1319                         (Equal, Less) => nl,
1320                         _ => tl,
1321                     }
1322                 }
1323                 (tl, None) => tl,
1324             };
1325
1326             // Now we can intern the variant layouts and store them in the enum layout.
1327             best_layout.layout.variants = match best_layout.layout.variants {
1328                 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1329                     tag,
1330                     tag_encoding,
1331                     tag_field,
1332                     variants: best_layout
1333                         .variants
1334                         .into_iter()
1335                         .map(|layout| tcx.intern_layout(layout))
1336                         .collect(),
1337                 },
1338                 _ => bug!(),
1339             };
1340
1341             tcx.intern_layout(best_layout.layout)
1342         }
1343
1344         // Types with no meaningful known layout.
1345         ty::Projection(_) | ty::Opaque(..) => {
1346             // NOTE(eddyb) `layout_of` query should've normalized these away,
1347             // if that was possible, so there's no reason to try again here.
1348             return Err(LayoutError::Unknown(ty));
1349         }
1350
1351         ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1352             bug!("Layout::compute: unexpected type `{}`", ty)
1353         }
1354
1355         ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1356             return Err(LayoutError::Unknown(ty));
1357         }
1358     })
1359 }
1360
1361 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1362 #[derive(Clone, Debug, PartialEq)]
1363 enum SavedLocalEligibility {
1364     Unassigned,
1365     Assigned(VariantIdx),
1366     // FIXME: Use newtype_index so we aren't wasting bytes
1367     Ineligible(Option<u32>),
1368 }
1369
1370 // When laying out generators, we divide our saved local fields into two
1371 // categories: overlap-eligible and overlap-ineligible.
1372 //
1373 // Those fields which are ineligible for overlap go in a "prefix" at the
1374 // beginning of the layout, and always have space reserved for them.
1375 //
1376 // Overlap-eligible fields are only assigned to one variant, so we lay
1377 // those fields out for each variant and put them right after the
1378 // prefix.
1379 //
1380 // Finally, in the layout details, we point to the fields from the
1381 // variants they are assigned to. It is possible for some fields to be
1382 // included in multiple variants. No field ever "moves around" in the
1383 // layout; its offset is always the same.
1384 //
1385 // Also included in the layout are the upvars and the discriminant.
1386 // These are included as fields on the "outer" layout; they are not part
1387 // of any variant.
1388
1389 /// Compute the eligibility and assignment of each local.
1390 fn generator_saved_local_eligibility<'tcx>(
1391     info: &GeneratorLayout<'tcx>,
1392 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1393     use SavedLocalEligibility::*;
1394
1395     let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1396         IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1397
1398     // The saved locals not eligible for overlap. These will get
1399     // "promoted" to the prefix of our generator.
1400     let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1401
1402     // Figure out which of our saved locals are fields in only
1403     // one variant. The rest are deemed ineligible for overlap.
1404     for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1405         for local in fields {
1406             match assignments[*local] {
1407                 Unassigned => {
1408                     assignments[*local] = Assigned(variant_index);
1409                 }
1410                 Assigned(idx) => {
1411                     // We've already seen this local at another suspension
1412                     // point, so it is no longer a candidate.
1413                     trace!(
1414                         "removing local {:?} in >1 variant ({:?}, {:?})",
1415                         local,
1416                         variant_index,
1417                         idx
1418                     );
1419                     ineligible_locals.insert(*local);
1420                     assignments[*local] = Ineligible(None);
1421                 }
1422                 Ineligible(_) => {}
1423             }
1424         }
1425     }
1426
1427     // Next, check every pair of eligible locals to see if they
1428     // conflict.
1429     for local_a in info.storage_conflicts.rows() {
1430         let conflicts_a = info.storage_conflicts.count(local_a);
1431         if ineligible_locals.contains(local_a) {
1432             continue;
1433         }
1434
1435         for local_b in info.storage_conflicts.iter(local_a) {
1436             // local_a and local_b are storage live at the same time, therefore they
1437             // cannot overlap in the generator layout. The only way to guarantee
1438             // this is if they are in the same variant, or one is ineligible
1439             // (which means it is stored in every variant).
1440             if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
1441                 continue;
1442             }
1443
1444             // If they conflict, we will choose one to make ineligible.
1445             // This is not always optimal; it's just a greedy heuristic that
1446             // seems to produce good results most of the time.
1447             let conflicts_b = info.storage_conflicts.count(local_b);
1448             let (remove, other) =
1449                 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1450             ineligible_locals.insert(remove);
1451             assignments[remove] = Ineligible(None);
1452             trace!("removing local {:?} due to conflict with {:?}", remove, other);
1453         }
1454     }
1455
1456     // Count the number of variants in use. If only one of them, then it is
1457     // impossible to overlap any locals in our layout. In this case it's
1458     // always better to make the remaining locals ineligible, so we can
1459     // lay them out with the other locals in the prefix and eliminate
1460     // unnecessary padding bytes.
1461     {
1462         let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1463         for assignment in &assignments {
1464             if let Assigned(idx) = assignment {
1465                 used_variants.insert(*idx);
1466             }
1467         }
1468         if used_variants.count() < 2 {
1469             for assignment in assignments.iter_mut() {
1470                 *assignment = Ineligible(None);
1471             }
1472             ineligible_locals.insert_all();
1473         }
1474     }
1475
1476     // Write down the order of our locals that will be promoted to the prefix.
1477     {
1478         for (idx, local) in ineligible_locals.iter().enumerate() {
1479             assignments[local] = Ineligible(Some(idx as u32));
1480         }
1481     }
1482     debug!("generator saved local assignments: {:?}", assignments);
1483
1484     (ineligible_locals, assignments)
1485 }
1486
1487 /// Compute the full generator layout.
1488 fn generator_layout<'tcx>(
1489     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1490     ty: Ty<'tcx>,
1491     def_id: hir::def_id::DefId,
1492     substs: SubstsRef<'tcx>,
1493 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1494     use SavedLocalEligibility::*;
1495     let tcx = cx.tcx;
1496     let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1497
1498     let Some(info) = tcx.generator_layout(def_id) else {
1499             return Err(LayoutError::Unknown(ty));
1500         };
1501     let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
1502
1503     // Build a prefix layout, including "promoting" all ineligible
1504     // locals as part of the prefix. We compute the layout of all of
1505     // these fields at once to get optimal packing.
1506     let tag_index = substs.as_generator().prefix_tys().count();
1507
1508     // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1509     let max_discr = (info.variant_fields.len() - 1) as u128;
1510     let discr_int = Integer::fit_unsigned(max_discr);
1511     let discr_int_ty = discr_int.to_ty(tcx, false);
1512     let tag = Scalar::Initialized {
1513         value: Primitive::Int(discr_int, false),
1514         valid_range: WrappingRange { start: 0, end: max_discr },
1515     };
1516     let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
1517     let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1518
1519     let promoted_layouts = ineligible_locals
1520         .iter()
1521         .map(|local| subst_field(info.field_tys[local]))
1522         .map(|ty| tcx.mk_maybe_uninit(ty))
1523         .map(|ty| cx.layout_of(ty));
1524     let prefix_layouts = substs
1525         .as_generator()
1526         .prefix_tys()
1527         .map(|ty| cx.layout_of(ty))
1528         .chain(iter::once(Ok(tag_layout)))
1529         .chain(promoted_layouts)
1530         .collect::<Result<Vec<_>, _>>()?;
1531     let prefix = univariant_uninterned(
1532         cx,
1533         ty,
1534         &prefix_layouts,
1535         &ReprOptions::default(),
1536         StructKind::AlwaysSized,
1537     )?;
1538
1539     let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1540
1541     // Split the prefix layout into the "outer" fields (upvars and
1542     // discriminant) and the "promoted" fields. Promoted fields will
1543     // get included in each variant that requested them in
1544     // GeneratorLayout.
1545     debug!("prefix = {:#?}", prefix);
1546     let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1547         FieldsShape::Arbitrary { mut offsets, memory_index } => {
1548             let mut inverse_memory_index = invert_mapping(&memory_index);
1549
1550             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1551             // "outer" and "promoted" fields respectively.
1552             let b_start = (tag_index + 1) as u32;
1553             let offsets_b = offsets.split_off(b_start as usize);
1554             let offsets_a = offsets;
1555
1556             // Disentangle the "a" and "b" components of `inverse_memory_index`
1557             // by preserving the order but keeping only one disjoint "half" each.
1558             // FIXME(eddyb) build a better abstraction for permutations, if possible.
1559             let inverse_memory_index_b: Vec<_> =
1560                 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1561             inverse_memory_index.retain(|&i| i < b_start);
1562             let inverse_memory_index_a = inverse_memory_index;
1563
1564             // Since `inverse_memory_index_{a,b}` each only refer to their
1565             // respective fields, they can be safely inverted
1566             let memory_index_a = invert_mapping(&inverse_memory_index_a);
1567             let memory_index_b = invert_mapping(&inverse_memory_index_b);
1568
1569             let outer_fields =
1570                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1571             (outer_fields, offsets_b, memory_index_b)
1572         }
1573         _ => bug!(),
1574     };
1575
1576     let mut size = prefix.size;
1577     let mut align = prefix.align;
1578     let variants = info
1579         .variant_fields
1580         .iter_enumerated()
1581         .map(|(index, variant_fields)| {
1582             // Only include overlap-eligible fields when we compute our variant layout.
1583             let variant_only_tys = variant_fields
1584                 .iter()
1585                 .filter(|local| match assignments[**local] {
1586                     Unassigned => bug!(),
1587                     Assigned(v) if v == index => true,
1588                     Assigned(_) => bug!("assignment does not match variant"),
1589                     Ineligible(_) => false,
1590                 })
1591                 .map(|local| subst_field(info.field_tys[*local]));
1592
1593             let mut variant = univariant_uninterned(
1594                 cx,
1595                 ty,
1596                 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1597                 &ReprOptions::default(),
1598                 StructKind::Prefixed(prefix_size, prefix_align.abi),
1599             )?;
1600             variant.variants = Variants::Single { index };
1601
1602             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1603                     bug!();
1604                 };
1605
1606             // Now, stitch the promoted and variant-only fields back together in
1607             // the order they are mentioned by our GeneratorLayout.
1608             // Because we only use some subset (that can differ between variants)
1609             // of the promoted fields, we can't just pick those elements of the
1610             // `promoted_memory_index` (as we'd end up with gaps).
1611             // So instead, we build an "inverse memory_index", as if all of the
1612             // promoted fields were being used, but leave the elements not in the
1613             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1614             // obtain a valid (bijective) mapping.
1615             const INVALID_FIELD_IDX: u32 = !0;
1616             let mut combined_inverse_memory_index =
1617                 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1618             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1619             let combined_offsets = variant_fields
1620                 .iter()
1621                 .enumerate()
1622                 .map(|(i, local)| {
1623                     let (offset, memory_index) = match assignments[*local] {
1624                         Unassigned => bug!(),
1625                         Assigned(_) => {
1626                             let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1627                             (offset, promoted_memory_index.len() as u32 + memory_index)
1628                         }
1629                         Ineligible(field_idx) => {
1630                             let field_idx = field_idx.unwrap() as usize;
1631                             (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1632                         }
1633                     };
1634                     combined_inverse_memory_index[memory_index as usize] = i as u32;
1635                     offset
1636                 })
1637                 .collect();
1638
1639             // Remove the unused slots and invert the mapping to obtain the
1640             // combined `memory_index` (also see previous comment).
1641             combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1642             let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1643
1644             variant.fields = FieldsShape::Arbitrary {
1645                 offsets: combined_offsets,
1646                 memory_index: combined_memory_index,
1647             };
1648
1649             size = size.max(variant.size);
1650             align = align.max(variant.align);
1651             Ok(tcx.intern_layout(variant))
1652         })
1653         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1654
1655     size = size.align_to(align.abi);
1656
1657     let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1658         Abi::Uninhabited
1659     } else {
1660         Abi::Aggregate { sized: true }
1661     };
1662
1663     let layout = tcx.intern_layout(LayoutS {
1664         variants: Variants::Multiple {
1665             tag,
1666             tag_encoding: TagEncoding::Direct,
1667             tag_field: tag_index,
1668             variants,
1669         },
1670         fields: outer_fields,
1671         abi,
1672         largest_niche: prefix.largest_niche,
1673         size,
1674         align,
1675     });
1676     debug!("generator layout ({:?}): {:#?}", ty, layout);
1677     Ok(layout)
1678 }
1679
1680 /// This is invoked by the `layout_of` query to record the final
1681 /// layout of each type.
1682 #[inline(always)]
1683 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
1684     // If we are running with `-Zprint-type-sizes`, maybe record layouts
1685     // for dumping later.
1686     if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
1687         record_layout_for_printing_outlined(cx, layout)
1688     }
1689 }
1690
1691 fn record_layout_for_printing_outlined<'tcx>(
1692     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1693     layout: TyAndLayout<'tcx>,
1694 ) {
1695     // Ignore layouts that are done with non-empty environments or
1696     // non-monomorphic layouts, as the user only wants to see the stuff
1697     // resulting from the final codegen session.
1698     if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
1699         return;
1700     }
1701
1702     // (delay format until we actually need it)
1703     let record = |kind, packed, opt_discr_size, variants| {
1704         let type_desc = format!("{:?}", layout.ty);
1705         cx.tcx.sess.code_stats.record_type_size(
1706             kind,
1707             type_desc,
1708             layout.align.abi,
1709             layout.size,
1710             packed,
1711             opt_discr_size,
1712             variants,
1713         );
1714     };
1715
1716     let adt_def = match *layout.ty.kind() {
1717         ty::Adt(ref adt_def, _) => {
1718             debug!("print-type-size t: `{:?}` process adt", layout.ty);
1719             adt_def
1720         }
1721
1722         ty::Closure(..) => {
1723             debug!("print-type-size t: `{:?}` record closure", layout.ty);
1724             record(DataTypeKind::Closure, false, None, vec![]);
1725             return;
1726         }
1727
1728         _ => {
1729             debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1730             return;
1731         }
1732     };
1733
1734     let adt_kind = adt_def.adt_kind();
1735     let adt_packed = adt_def.repr().pack.is_some();
1736
1737     let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1738         let mut min_size = Size::ZERO;
1739         let field_info: Vec<_> = flds
1740             .iter()
1741             .enumerate()
1742             .map(|(i, &name)| {
1743                 let field_layout = layout.field(cx, i);
1744                 let offset = layout.fields.offset(i);
1745                 let field_end = offset + field_layout.size;
1746                 if min_size < field_end {
1747                     min_size = field_end;
1748                 }
1749                 FieldInfo {
1750                     name,
1751                     offset: offset.bytes(),
1752                     size: field_layout.size.bytes(),
1753                     align: field_layout.align.abi.bytes(),
1754                 }
1755             })
1756             .collect();
1757
1758         VariantInfo {
1759             name: n,
1760             kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1761             align: layout.align.abi.bytes(),
1762             size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1763             fields: field_info,
1764         }
1765     };
1766
1767     match layout.variants {
1768         Variants::Single { index } => {
1769             if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1770                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
1771                 let variant_def = &adt_def.variant(index);
1772                 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1773                 record(
1774                     adt_kind.into(),
1775                     adt_packed,
1776                     None,
1777                     vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1778                 );
1779             } else {
1780                 // (This case arises for *empty* enums; so give it
1781                 // zero variants.)
1782                 record(adt_kind.into(), adt_packed, None, vec![]);
1783             }
1784         }
1785
1786         Variants::Multiple { tag, ref tag_encoding, .. } => {
1787             debug!(
1788                 "print-type-size `{:#?}` adt general variants def {}",
1789                 layout.ty,
1790                 adt_def.variants().len()
1791             );
1792             let variant_infos: Vec<_> = adt_def
1793                 .variants()
1794                 .iter_enumerated()
1795                 .map(|(i, variant_def)| {
1796                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1797                     build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
1798                 })
1799                 .collect();
1800             record(
1801                 adt_kind.into(),
1802                 adt_packed,
1803                 match tag_encoding {
1804                     TagEncoding::Direct => Some(tag.size(cx)),
1805                     _ => None,
1806                 },
1807                 variant_infos,
1808             );
1809         }
1810     }
1811 }