]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_ty_utils/src/layout.rs
Add tests for #41731
[rust.git] / compiler / rustc_ty_utils / src / layout.rs
1 use rustc_hir as hir;
2 use rustc_index::bit_set::BitSet;
3 use rustc_index::vec::{Idx, IndexVec};
4 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
5 use rustc_middle::ty::layout::{
6     IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
7 };
8 use rustc_middle::ty::{
9     self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
10 };
11 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
12 use rustc_span::symbol::Symbol;
13 use rustc_span::DUMMY_SP;
14 use rustc_target::abi::*;
15
16 use std::fmt::Debug;
17 use std::iter;
18
19 use crate::layout_sanity_check::sanity_check_layout;
20
21 pub fn provide(providers: &mut ty::query::Providers) {
22     *providers = ty::query::Providers { layout_of, ..*providers };
23 }
24
25 #[instrument(skip(tcx, query), level = "debug")]
26 fn layout_of<'tcx>(
27     tcx: TyCtxt<'tcx>,
28     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
29 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
30     let (param_env, ty) = query.into_parts();
31     debug!(?ty);
32
33     let param_env = param_env.with_reveal_all_normalized(tcx);
34     let unnormalized_ty = ty;
35
36     // FIXME: We might want to have two different versions of `layout_of`:
37     // One that can be called after typecheck has completed and can use
38     // `normalize_erasing_regions` here and another one that can be called
39     // before typecheck has completed and uses `try_normalize_erasing_regions`.
40     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
41         Ok(t) => t,
42         Err(normalization_error) => {
43             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
44         }
45     };
46
47     if ty != unnormalized_ty {
48         // Ensure this layout is also cached for the normalized type.
49         return tcx.layout_of(param_env.and(ty));
50     }
51
52     let cx = LayoutCx { tcx, param_env };
53
54     let layout = layout_of_uncached(&cx, ty)?;
55     let layout = TyAndLayout { ty, layout };
56
57     record_layout_for_printing(&cx, layout);
58
59     sanity_check_layout(&cx, &layout);
60
61     Ok(layout)
62 }
63
64 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
65 // This is used to go between `memory_index` (source field order to memory order)
66 // and `inverse_memory_index` (memory order to source field order).
67 // See also `FieldsShape::Arbitrary::memory_index` for more details.
68 // FIXME(eddyb) build a better abstraction for permutations, if possible.
69 fn invert_mapping(map: &[u32]) -> Vec<u32> {
70     let mut inverse = vec![0; map.len()];
71     for i in 0..map.len() {
72         inverse[map[i] as usize] = i as u32;
73     }
74     inverse
75 }
76
77 fn univariant_uninterned<'tcx>(
78     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
79     ty: Ty<'tcx>,
80     fields: &[TyAndLayout<'_>],
81     repr: &ReprOptions,
82     kind: StructKind,
83 ) -> Result<LayoutS<VariantIdx>, LayoutError<'tcx>> {
84     let dl = cx.data_layout();
85     let pack = repr.pack;
86     if pack.is_some() && repr.align.is_some() {
87         cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
88         return Err(LayoutError::Unknown(ty));
89     }
90
91     cx.univariant(dl, fields, repr, kind).ok_or(LayoutError::SizeOverflow(ty))
92 }
93
94 fn layout_of_uncached<'tcx>(
95     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
96     ty: Ty<'tcx>,
97 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
98     let tcx = cx.tcx;
99     let param_env = cx.param_env;
100     let dl = cx.data_layout();
101     let scalar_unit = |value: Primitive| {
102         let size = value.size(dl);
103         assert!(size.bits() <= 128);
104         Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
105     };
106     let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
107
108     let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
109         Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
110     };
111     debug_assert!(!ty.has_non_region_infer());
112
113     Ok(match *ty.kind() {
114         // Basic scalars.
115         ty::Bool => tcx.intern_layout(LayoutS::scalar(
116             cx,
117             Scalar::Initialized {
118                 value: Int(I8, false),
119                 valid_range: WrappingRange { start: 0, end: 1 },
120             },
121         )),
122         ty::Char => tcx.intern_layout(LayoutS::scalar(
123             cx,
124             Scalar::Initialized {
125                 value: Int(I32, false),
126                 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
127             },
128         )),
129         ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
130         ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
131         ty::Float(fty) => scalar(match fty {
132             ty::FloatTy::F32 => F32,
133             ty::FloatTy::F64 => F64,
134         }),
135         ty::FnPtr(_) => {
136             let mut ptr = scalar_unit(Pointer);
137             ptr.valid_range_mut().start = 1;
138             tcx.intern_layout(LayoutS::scalar(cx, ptr))
139         }
140
141         // The never type.
142         ty::Never => tcx.intern_layout(cx.layout_of_never_type()),
143
144         // Potentially-wide pointers.
145         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
146             let mut data_ptr = scalar_unit(Pointer);
147             if !ty.is_unsafe_ptr() {
148                 data_ptr.valid_range_mut().start = 1;
149             }
150
151             let pointee = tcx.normalize_erasing_regions(param_env, pointee);
152             if pointee.is_sized(tcx, param_env) {
153                 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
154             }
155
156             let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
157             let metadata = match unsized_part.kind() {
158                 ty::Foreign(..) => {
159                     return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
160                 }
161                 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
162                 ty::Dynamic(..) => {
163                     let mut vtable = scalar_unit(Pointer);
164                     vtable.valid_range_mut().start = 1;
165                     vtable
166                 }
167                 _ => return Err(LayoutError::Unknown(unsized_part)),
168             };
169
170             // Effectively a (ptr, meta) tuple.
171             tcx.intern_layout(cx.scalar_pair(data_ptr, metadata))
172         }
173
174         ty::Dynamic(_, _, ty::DynStar) => {
175             let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
176             data.valid_range_mut().start = 0;
177             let mut vtable = scalar_unit(Pointer);
178             vtable.valid_range_mut().start = 1;
179             tcx.intern_layout(cx.scalar_pair(data, vtable))
180         }
181
182         // Arrays and slices.
183         ty::Array(element, mut count) => {
184             if count.has_projections() {
185                 count = tcx.normalize_erasing_regions(param_env, count);
186                 if count.has_projections() {
187                     return Err(LayoutError::Unknown(ty));
188                 }
189             }
190
191             let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
192             let element = cx.layout_of(element)?;
193             let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
194
195             let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
196                 Abi::Uninhabited
197             } else {
198                 Abi::Aggregate { sized: true }
199             };
200
201             let largest_niche = if count != 0 { element.largest_niche } else { None };
202
203             tcx.intern_layout(LayoutS {
204                 variants: Variants::Single { index: VariantIdx::new(0) },
205                 fields: FieldsShape::Array { stride: element.size, count },
206                 abi,
207                 largest_niche,
208                 align: element.align,
209                 size,
210             })
211         }
212         ty::Slice(element) => {
213             let element = cx.layout_of(element)?;
214             tcx.intern_layout(LayoutS {
215                 variants: Variants::Single { index: VariantIdx::new(0) },
216                 fields: FieldsShape::Array { stride: element.size, count: 0 },
217                 abi: Abi::Aggregate { sized: false },
218                 largest_niche: None,
219                 align: element.align,
220                 size: Size::ZERO,
221             })
222         }
223         ty::Str => tcx.intern_layout(LayoutS {
224             variants: Variants::Single { index: VariantIdx::new(0) },
225             fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
226             abi: Abi::Aggregate { sized: false },
227             largest_niche: None,
228             align: dl.i8_align,
229             size: Size::ZERO,
230         }),
231
232         // Odd unit types.
233         ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
234         ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
235             let mut unit = univariant_uninterned(
236                 cx,
237                 ty,
238                 &[],
239                 &ReprOptions::default(),
240                 StructKind::AlwaysSized,
241             )?;
242             match unit.abi {
243                 Abi::Aggregate { ref mut sized } => *sized = false,
244                 _ => bug!(),
245             }
246             tcx.intern_layout(unit)
247         }
248
249         ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
250
251         ty::Closure(_, ref substs) => {
252             let tys = substs.as_closure().upvar_tys();
253             univariant(
254                 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
255                 &ReprOptions::default(),
256                 StructKind::AlwaysSized,
257             )?
258         }
259
260         ty::Tuple(tys) => {
261             let kind =
262                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
263
264             univariant(
265                 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
266                 &ReprOptions::default(),
267                 kind,
268             )?
269         }
270
271         // SIMD vector types.
272         ty::Adt(def, substs) if def.repr().simd() => {
273             if !def.is_struct() {
274                 // Should have yielded E0517 by now.
275                 tcx.sess.delay_span_bug(
276                     DUMMY_SP,
277                     "#[repr(simd)] was applied to an ADT that is not a struct",
278                 );
279                 return Err(LayoutError::Unknown(ty));
280             }
281
282             // Supported SIMD vectors are homogeneous ADTs with at least one field:
283             //
284             // * #[repr(simd)] struct S(T, T, T, T);
285             // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
286             // * #[repr(simd)] struct S([T; 4])
287             //
288             // where T is a primitive scalar (integer/float/pointer).
289
290             // SIMD vectors with zero fields are not supported.
291             // (should be caught by typeck)
292             if def.non_enum_variant().fields.is_empty() {
293                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
294             }
295
296             // Type of the first ADT field:
297             let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
298
299             // Heterogeneous SIMD vectors are not supported:
300             // (should be caught by typeck)
301             for fi in &def.non_enum_variant().fields {
302                 if fi.ty(tcx, substs) != f0_ty {
303                     tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
304                 }
305             }
306
307             // The element type and number of elements of the SIMD vector
308             // are obtained from:
309             //
310             // * the element type and length of the single array field, if
311             // the first field is of array type, or
312             //
313             // * the homogeneous field type and the number of fields.
314             let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
315                 // First ADT field is an array:
316
317                 // SIMD vectors with multiple array fields are not supported:
318                 // (should be caught by typeck)
319                 if def.non_enum_variant().fields.len() != 1 {
320                     tcx.sess.fatal(&format!(
321                         "monomorphising SIMD type `{}` with more than one array field",
322                         ty
323                     ));
324                 }
325
326                 // Extract the number of elements from the layout of the array field:
327                 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
328                     return Err(LayoutError::Unknown(ty));
329                 };
330
331                 (*e_ty, *count, true)
332             } else {
333                 // First ADT field is not an array:
334                 (f0_ty, def.non_enum_variant().fields.len() as _, false)
335             };
336
337             // SIMD vectors of zero length are not supported.
338             // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
339             // support.
340             //
341             // Can't be caught in typeck if the array length is generic.
342             if e_len == 0 {
343                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
344             } else if e_len > MAX_SIMD_LANES {
345                 tcx.sess.fatal(&format!(
346                     "monomorphising SIMD type `{}` of length greater than {}",
347                     ty, MAX_SIMD_LANES,
348                 ));
349             }
350
351             // Compute the ABI of the element type:
352             let e_ly = cx.layout_of(e_ty)?;
353             let Abi::Scalar(e_abi) = e_ly.abi else {
354                 // This error isn't caught in typeck, e.g., if
355                 // the element type of the vector is generic.
356                 tcx.sess.fatal(&format!(
357                     "monomorphising SIMD type `{}` with a non-primitive-scalar \
358                     (integer/float/pointer) element type `{}`",
359                     ty, e_ty
360                 ))
361             };
362
363             // Compute the size and alignment of the vector:
364             let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
365             let align = dl.vector_align(size);
366             let size = size.align_to(align.abi);
367
368             // Compute the placement of the vector fields:
369             let fields = if is_array {
370                 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
371             } else {
372                 FieldsShape::Array { stride: e_ly.size, count: e_len }
373             };
374
375             tcx.intern_layout(LayoutS {
376                 variants: Variants::Single { index: VariantIdx::new(0) },
377                 fields,
378                 abi: Abi::Vector { element: e_abi, count: e_len },
379                 largest_niche: e_ly.largest_niche,
380                 size,
381                 align,
382             })
383         }
384
385         // ADTs.
386         ty::Adt(def, substs) => {
387             // Cache the field layouts.
388             let variants = def
389                 .variants()
390                 .iter()
391                 .map(|v| {
392                     v.fields
393                         .iter()
394                         .map(|field| cx.layout_of(field.ty(tcx, substs)))
395                         .collect::<Result<Vec<_>, _>>()
396                 })
397                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
398
399             if def.is_union() {
400                 if def.repr().pack.is_some() && def.repr().align.is_some() {
401                     cx.tcx.sess.delay_span_bug(
402                         tcx.def_span(def.did()),
403                         "union cannot be packed and aligned",
404                     );
405                     return Err(LayoutError::Unknown(ty));
406                 }
407
408                 return Ok(tcx.intern_layout(
409                     cx.layout_of_union(&def.repr(), &variants).ok_or(LayoutError::Unknown(ty))?,
410                 ));
411             }
412
413             tcx.intern_layout(
414                 cx.layout_of_struct_or_enum(
415                     &def.repr(),
416                     &variants,
417                     def.is_enum(),
418                     def.is_unsafe_cell(),
419                     tcx.layout_scalar_valid_range(def.did()),
420                     |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max),
421                     def.is_enum()
422                         .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
423                         .into_iter()
424                         .flatten(),
425                     def.repr().inhibit_enum_layout_opt()
426                         || def
427                             .variants()
428                             .iter_enumerated()
429                             .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())),
430                     {
431                         let param_env = tcx.param_env(def.did());
432                         def.is_struct()
433                             && match def.variants().iter().next().and_then(|x| x.fields.last()) {
434                                 Some(last_field) => {
435                                     tcx.type_of(last_field.did).is_sized(tcx, param_env)
436                                 }
437                                 None => false,
438                             }
439                     },
440                 )
441                 .ok_or(LayoutError::SizeOverflow(ty))?,
442             )
443         }
444
445         // Types with no meaningful known layout.
446         ty::Projection(_) | ty::Opaque(..) => {
447             // NOTE(eddyb) `layout_of` query should've normalized these away,
448             // if that was possible, so there's no reason to try again here.
449             return Err(LayoutError::Unknown(ty));
450         }
451
452         ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
453             bug!("Layout::compute: unexpected type `{}`", ty)
454         }
455
456         ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
457             return Err(LayoutError::Unknown(ty));
458         }
459     })
460 }
461
462 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
463 #[derive(Clone, Debug, PartialEq)]
464 enum SavedLocalEligibility {
465     Unassigned,
466     Assigned(VariantIdx),
467     // FIXME: Use newtype_index so we aren't wasting bytes
468     Ineligible(Option<u32>),
469 }
470
471 // When laying out generators, we divide our saved local fields into two
472 // categories: overlap-eligible and overlap-ineligible.
473 //
474 // Those fields which are ineligible for overlap go in a "prefix" at the
475 // beginning of the layout, and always have space reserved for them.
476 //
477 // Overlap-eligible fields are only assigned to one variant, so we lay
478 // those fields out for each variant and put them right after the
479 // prefix.
480 //
481 // Finally, in the layout details, we point to the fields from the
482 // variants they are assigned to. It is possible for some fields to be
483 // included in multiple variants. No field ever "moves around" in the
484 // layout; its offset is always the same.
485 //
486 // Also included in the layout are the upvars and the discriminant.
487 // These are included as fields on the "outer" layout; they are not part
488 // of any variant.
489
490 /// Compute the eligibility and assignment of each local.
491 fn generator_saved_local_eligibility<'tcx>(
492     info: &GeneratorLayout<'tcx>,
493 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
494     use SavedLocalEligibility::*;
495
496     let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
497         IndexVec::from_elem_n(Unassigned, info.field_tys.len());
498
499     // The saved locals not eligible for overlap. These will get
500     // "promoted" to the prefix of our generator.
501     let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
502
503     // Figure out which of our saved locals are fields in only
504     // one variant. The rest are deemed ineligible for overlap.
505     for (variant_index, fields) in info.variant_fields.iter_enumerated() {
506         for local in fields {
507             match assignments[*local] {
508                 Unassigned => {
509                     assignments[*local] = Assigned(variant_index);
510                 }
511                 Assigned(idx) => {
512                     // We've already seen this local at another suspension
513                     // point, so it is no longer a candidate.
514                     trace!(
515                         "removing local {:?} in >1 variant ({:?}, {:?})",
516                         local,
517                         variant_index,
518                         idx
519                     );
520                     ineligible_locals.insert(*local);
521                     assignments[*local] = Ineligible(None);
522                 }
523                 Ineligible(_) => {}
524             }
525         }
526     }
527
528     // Next, check every pair of eligible locals to see if they
529     // conflict.
530     for local_a in info.storage_conflicts.rows() {
531         let conflicts_a = info.storage_conflicts.count(local_a);
532         if ineligible_locals.contains(local_a) {
533             continue;
534         }
535
536         for local_b in info.storage_conflicts.iter(local_a) {
537             // local_a and local_b are storage live at the same time, therefore they
538             // cannot overlap in the generator layout. The only way to guarantee
539             // this is if they are in the same variant, or one is ineligible
540             // (which means it is stored in every variant).
541             if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
542                 continue;
543             }
544
545             // If they conflict, we will choose one to make ineligible.
546             // This is not always optimal; it's just a greedy heuristic that
547             // seems to produce good results most of the time.
548             let conflicts_b = info.storage_conflicts.count(local_b);
549             let (remove, other) =
550                 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
551             ineligible_locals.insert(remove);
552             assignments[remove] = Ineligible(None);
553             trace!("removing local {:?} due to conflict with {:?}", remove, other);
554         }
555     }
556
557     // Count the number of variants in use. If only one of them, then it is
558     // impossible to overlap any locals in our layout. In this case it's
559     // always better to make the remaining locals ineligible, so we can
560     // lay them out with the other locals in the prefix and eliminate
561     // unnecessary padding bytes.
562     {
563         let mut used_variants = BitSet::new_empty(info.variant_fields.len());
564         for assignment in &assignments {
565             if let Assigned(idx) = assignment {
566                 used_variants.insert(*idx);
567             }
568         }
569         if used_variants.count() < 2 {
570             for assignment in assignments.iter_mut() {
571                 *assignment = Ineligible(None);
572             }
573             ineligible_locals.insert_all();
574         }
575     }
576
577     // Write down the order of our locals that will be promoted to the prefix.
578     {
579         for (idx, local) in ineligible_locals.iter().enumerate() {
580             assignments[local] = Ineligible(Some(idx as u32));
581         }
582     }
583     debug!("generator saved local assignments: {:?}", assignments);
584
585     (ineligible_locals, assignments)
586 }
587
588 /// Compute the full generator layout.
589 fn generator_layout<'tcx>(
590     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
591     ty: Ty<'tcx>,
592     def_id: hir::def_id::DefId,
593     substs: SubstsRef<'tcx>,
594 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
595     use SavedLocalEligibility::*;
596     let tcx = cx.tcx;
597     let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
598
599     let Some(info) = tcx.generator_layout(def_id) else {
600         return Err(LayoutError::Unknown(ty));
601     };
602     let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
603
604     // Build a prefix layout, including "promoting" all ineligible
605     // locals as part of the prefix. We compute the layout of all of
606     // these fields at once to get optimal packing.
607     let tag_index = substs.as_generator().prefix_tys().count();
608
609     // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
610     let max_discr = (info.variant_fields.len() - 1) as u128;
611     let discr_int = Integer::fit_unsigned(max_discr);
612     let discr_int_ty = discr_int.to_ty(tcx, false);
613     let tag = Scalar::Initialized {
614         value: Primitive::Int(discr_int, false),
615         valid_range: WrappingRange { start: 0, end: max_discr },
616     };
617     let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
618     let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
619
620     let promoted_layouts = ineligible_locals
621         .iter()
622         .map(|local| subst_field(info.field_tys[local]))
623         .map(|ty| tcx.mk_maybe_uninit(ty))
624         .map(|ty| cx.layout_of(ty));
625     let prefix_layouts = substs
626         .as_generator()
627         .prefix_tys()
628         .map(|ty| cx.layout_of(ty))
629         .chain(iter::once(Ok(tag_layout)))
630         .chain(promoted_layouts)
631         .collect::<Result<Vec<_>, _>>()?;
632     let prefix = univariant_uninterned(
633         cx,
634         ty,
635         &prefix_layouts,
636         &ReprOptions::default(),
637         StructKind::AlwaysSized,
638     )?;
639
640     let (prefix_size, prefix_align) = (prefix.size, prefix.align);
641
642     // Split the prefix layout into the "outer" fields (upvars and
643     // discriminant) and the "promoted" fields. Promoted fields will
644     // get included in each variant that requested them in
645     // GeneratorLayout.
646     debug!("prefix = {:#?}", prefix);
647     let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
648         FieldsShape::Arbitrary { mut offsets, memory_index } => {
649             let mut inverse_memory_index = invert_mapping(&memory_index);
650
651             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
652             // "outer" and "promoted" fields respectively.
653             let b_start = (tag_index + 1) as u32;
654             let offsets_b = offsets.split_off(b_start as usize);
655             let offsets_a = offsets;
656
657             // Disentangle the "a" and "b" components of `inverse_memory_index`
658             // by preserving the order but keeping only one disjoint "half" each.
659             // FIXME(eddyb) build a better abstraction for permutations, if possible.
660             let inverse_memory_index_b: Vec<_> =
661                 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
662             inverse_memory_index.retain(|&i| i < b_start);
663             let inverse_memory_index_a = inverse_memory_index;
664
665             // Since `inverse_memory_index_{a,b}` each only refer to their
666             // respective fields, they can be safely inverted
667             let memory_index_a = invert_mapping(&inverse_memory_index_a);
668             let memory_index_b = invert_mapping(&inverse_memory_index_b);
669
670             let outer_fields =
671                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
672             (outer_fields, offsets_b, memory_index_b)
673         }
674         _ => bug!(),
675     };
676
677     let mut size = prefix.size;
678     let mut align = prefix.align;
679     let variants = info
680         .variant_fields
681         .iter_enumerated()
682         .map(|(index, variant_fields)| {
683             // Only include overlap-eligible fields when we compute our variant layout.
684             let variant_only_tys = variant_fields
685                 .iter()
686                 .filter(|local| match assignments[**local] {
687                     Unassigned => bug!(),
688                     Assigned(v) if v == index => true,
689                     Assigned(_) => bug!("assignment does not match variant"),
690                     Ineligible(_) => false,
691                 })
692                 .map(|local| subst_field(info.field_tys[*local]));
693
694             let mut variant = univariant_uninterned(
695                 cx,
696                 ty,
697                 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
698                 &ReprOptions::default(),
699                 StructKind::Prefixed(prefix_size, prefix_align.abi),
700             )?;
701             variant.variants = Variants::Single { index };
702
703             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
704                 bug!();
705             };
706
707             // Now, stitch the promoted and variant-only fields back together in
708             // the order they are mentioned by our GeneratorLayout.
709             // Because we only use some subset (that can differ between variants)
710             // of the promoted fields, we can't just pick those elements of the
711             // `promoted_memory_index` (as we'd end up with gaps).
712             // So instead, we build an "inverse memory_index", as if all of the
713             // promoted fields were being used, but leave the elements not in the
714             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
715             // obtain a valid (bijective) mapping.
716             const INVALID_FIELD_IDX: u32 = !0;
717             let mut combined_inverse_memory_index =
718                 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
719             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
720             let combined_offsets = variant_fields
721                 .iter()
722                 .enumerate()
723                 .map(|(i, local)| {
724                     let (offset, memory_index) = match assignments[*local] {
725                         Unassigned => bug!(),
726                         Assigned(_) => {
727                             let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
728                             (offset, promoted_memory_index.len() as u32 + memory_index)
729                         }
730                         Ineligible(field_idx) => {
731                             let field_idx = field_idx.unwrap() as usize;
732                             (promoted_offsets[field_idx], promoted_memory_index[field_idx])
733                         }
734                     };
735                     combined_inverse_memory_index[memory_index as usize] = i as u32;
736                     offset
737                 })
738                 .collect();
739
740             // Remove the unused slots and invert the mapping to obtain the
741             // combined `memory_index` (also see previous comment).
742             combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
743             let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
744
745             variant.fields = FieldsShape::Arbitrary {
746                 offsets: combined_offsets,
747                 memory_index: combined_memory_index,
748             };
749
750             size = size.max(variant.size);
751             align = align.max(variant.align);
752             Ok(variant)
753         })
754         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
755
756     size = size.align_to(align.abi);
757
758     let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
759         Abi::Uninhabited
760     } else {
761         Abi::Aggregate { sized: true }
762     };
763
764     let layout = tcx.intern_layout(LayoutS {
765         variants: Variants::Multiple {
766             tag,
767             tag_encoding: TagEncoding::Direct,
768             tag_field: tag_index,
769             variants,
770         },
771         fields: outer_fields,
772         abi,
773         largest_niche: prefix.largest_niche,
774         size,
775         align,
776     });
777     debug!("generator layout ({:?}): {:#?}", ty, layout);
778     Ok(layout)
779 }
780
781 /// This is invoked by the `layout_of` query to record the final
782 /// layout of each type.
783 #[inline(always)]
784 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
785     // If we are running with `-Zprint-type-sizes`, maybe record layouts
786     // for dumping later.
787     if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
788         record_layout_for_printing_outlined(cx, layout)
789     }
790 }
791
792 fn record_layout_for_printing_outlined<'tcx>(
793     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
794     layout: TyAndLayout<'tcx>,
795 ) {
796     // Ignore layouts that are done with non-empty environments or
797     // non-monomorphic layouts, as the user only wants to see the stuff
798     // resulting from the final codegen session.
799     if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
800         return;
801     }
802
803     // (delay format until we actually need it)
804     let record = |kind, packed, opt_discr_size, variants| {
805         let type_desc = format!("{:?}", layout.ty);
806         cx.tcx.sess.code_stats.record_type_size(
807             kind,
808             type_desc,
809             layout.align.abi,
810             layout.size,
811             packed,
812             opt_discr_size,
813             variants,
814         );
815     };
816
817     let adt_def = match *layout.ty.kind() {
818         ty::Adt(ref adt_def, _) => {
819             debug!("print-type-size t: `{:?}` process adt", layout.ty);
820             adt_def
821         }
822
823         ty::Closure(..) => {
824             debug!("print-type-size t: `{:?}` record closure", layout.ty);
825             record(DataTypeKind::Closure, false, None, vec![]);
826             return;
827         }
828
829         _ => {
830             debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
831             return;
832         }
833     };
834
835     let adt_kind = adt_def.adt_kind();
836     let adt_packed = adt_def.repr().pack.is_some();
837
838     let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
839         let mut min_size = Size::ZERO;
840         let field_info: Vec<_> = flds
841             .iter()
842             .enumerate()
843             .map(|(i, &name)| {
844                 let field_layout = layout.field(cx, i);
845                 let offset = layout.fields.offset(i);
846                 let field_end = offset + field_layout.size;
847                 if min_size < field_end {
848                     min_size = field_end;
849                 }
850                 FieldInfo {
851                     name,
852                     offset: offset.bytes(),
853                     size: field_layout.size.bytes(),
854                     align: field_layout.align.abi.bytes(),
855                 }
856             })
857             .collect();
858
859         VariantInfo {
860             name: n,
861             kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
862             align: layout.align.abi.bytes(),
863             size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
864             fields: field_info,
865         }
866     };
867
868     match layout.variants {
869         Variants::Single { index } => {
870             if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
871                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
872                 let variant_def = &adt_def.variant(index);
873                 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
874                 record(
875                     adt_kind.into(),
876                     adt_packed,
877                     None,
878                     vec![build_variant_info(Some(variant_def.name), &fields, layout)],
879                 );
880             } else {
881                 // (This case arises for *empty* enums; so give it
882                 // zero variants.)
883                 record(adt_kind.into(), adt_packed, None, vec![]);
884             }
885         }
886
887         Variants::Multiple { tag, ref tag_encoding, .. } => {
888             debug!(
889                 "print-type-size `{:#?}` adt general variants def {}",
890                 layout.ty,
891                 adt_def.variants().len()
892             );
893             let variant_infos: Vec<_> = adt_def
894                 .variants()
895                 .iter_enumerated()
896                 .map(|(i, variant_def)| {
897                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
898                     build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
899                 })
900                 .collect();
901             record(
902                 adt_kind.into(),
903                 adt_packed,
904                 match tag_encoding {
905                     TagEncoding::Direct => Some(tag.size(cx)),
906                     _ => None,
907                 },
908                 variant_infos,
909             );
910         }
911     }
912 }