]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_ty_utils/src/layout.rs
Rollup merge of #107706 - tgross35:atomic-as-mut-ptr, r=m-ou-se
[rust.git] / compiler / rustc_ty_utils / src / layout.rs
1 use hir::def_id::DefId;
2 use rustc_hir as hir;
3 use rustc_index::bit_set::BitSet;
4 use rustc_index::vec::{Idx, IndexVec};
5 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
6 use rustc_middle::ty::layout::{
7     IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
8 };
9 use rustc_middle::ty::{
10     self, subst::SubstsRef, AdtDef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
11 };
12 use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::DUMMY_SP;
15 use rustc_target::abi::*;
16
17 use std::fmt::Debug;
18 use std::iter;
19
20 use crate::layout_sanity_check::sanity_check_layout;
21
22 pub fn provide(providers: &mut ty::query::Providers) {
23     *providers = ty::query::Providers { layout_of, ..*providers };
24 }
25
26 #[instrument(skip(tcx, query), level = "debug")]
27 fn layout_of<'tcx>(
28     tcx: TyCtxt<'tcx>,
29     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
30 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
31     let (param_env, ty) = query.into_parts();
32     debug!(?ty);
33
34     let param_env = param_env.with_reveal_all_normalized(tcx);
35     let unnormalized_ty = ty;
36
37     // FIXME: We might want to have two different versions of `layout_of`:
38     // One that can be called after typecheck has completed and can use
39     // `normalize_erasing_regions` here and another one that can be called
40     // before typecheck has completed and uses `try_normalize_erasing_regions`.
41     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
42         Ok(t) => t,
43         Err(normalization_error) => {
44             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
45         }
46     };
47
48     if ty != unnormalized_ty {
49         // Ensure this layout is also cached for the normalized type.
50         return tcx.layout_of(param_env.and(ty));
51     }
52
53     let cx = LayoutCx { tcx, param_env };
54
55     let layout = layout_of_uncached(&cx, ty)?;
56     let layout = TyAndLayout { ty, layout };
57
58     record_layout_for_printing(&cx, layout);
59
60     sanity_check_layout(&cx, &layout);
61
62     Ok(layout)
63 }
64
65 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
66 // This is used to go between `memory_index` (source field order to memory order)
67 // and `inverse_memory_index` (memory order to source field order).
68 // See also `FieldsShape::Arbitrary::memory_index` for more details.
69 // FIXME(eddyb) build a better abstraction for permutations, if possible.
70 fn invert_mapping(map: &[u32]) -> Vec<u32> {
71     let mut inverse = vec![0; map.len()];
72     for i in 0..map.len() {
73         inverse[map[i] as usize] = i as u32;
74     }
75     inverse
76 }
77
78 fn univariant_uninterned<'tcx>(
79     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
80     ty: Ty<'tcx>,
81     fields: &[TyAndLayout<'_>],
82     repr: &ReprOptions,
83     kind: StructKind,
84 ) -> Result<LayoutS<VariantIdx>, LayoutError<'tcx>> {
85     let dl = cx.data_layout();
86     let pack = repr.pack;
87     if pack.is_some() && repr.align.is_some() {
88         cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
89         return Err(LayoutError::Unknown(ty));
90     }
91
92     cx.univariant(dl, fields, repr, kind).ok_or(LayoutError::SizeOverflow(ty))
93 }
94
95 fn layout_of_uncached<'tcx>(
96     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
97     ty: Ty<'tcx>,
98 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
99     let tcx = cx.tcx;
100     let param_env = cx.param_env;
101     let dl = cx.data_layout();
102     let scalar_unit = |value: Primitive| {
103         let size = value.size(dl);
104         assert!(size.bits() <= 128);
105         Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
106     };
107     let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
108
109     let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
110         Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
111     };
112     debug_assert!(!ty.has_non_region_infer());
113
114     Ok(match *ty.kind() {
115         // Basic scalars.
116         ty::Bool => tcx.intern_layout(LayoutS::scalar(
117             cx,
118             Scalar::Initialized {
119                 value: Int(I8, false),
120                 valid_range: WrappingRange { start: 0, end: 1 },
121             },
122         )),
123         ty::Char => tcx.intern_layout(LayoutS::scalar(
124             cx,
125             Scalar::Initialized {
126                 value: Int(I32, false),
127                 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
128             },
129         )),
130         ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
131         ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
132         ty::Float(fty) => scalar(match fty {
133             ty::FloatTy::F32 => F32,
134             ty::FloatTy::F64 => F64,
135         }),
136         ty::FnPtr(_) => {
137             let mut ptr = scalar_unit(Pointer(dl.instruction_address_space));
138             ptr.valid_range_mut().start = 1;
139             tcx.intern_layout(LayoutS::scalar(cx, ptr))
140         }
141
142         // The never type.
143         ty::Never => tcx.intern_layout(cx.layout_of_never_type()),
144
145         // Potentially-wide pointers.
146         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
147             let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA));
148             if !ty.is_unsafe_ptr() {
149                 data_ptr.valid_range_mut().start = 1;
150             }
151
152             let pointee = tcx.normalize_erasing_regions(param_env, pointee);
153             if pointee.is_sized(tcx, param_env) {
154                 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
155             }
156
157             let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
158
159             let metadata = if let Some(metadata_def_id) = tcx.lang_items().metadata_type() {
160                 let metadata_ty = tcx.normalize_erasing_regions(
161                     param_env,
162                     tcx.mk_projection(metadata_def_id, [pointee]),
163                 );
164                 let metadata_layout = cx.layout_of(metadata_ty)?;
165                 // If the metadata is a 1-zst, then the pointer is thin.
166                 if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 {
167                     return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
168                 }
169
170                 let Abi::Scalar(metadata) = metadata_layout.abi else {
171                     return Err(LayoutError::Unknown(unsized_part));
172                 };
173                 metadata
174             } else {
175                 match unsized_part.kind() {
176                     ty::Foreign(..) => {
177                         return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
178                     }
179                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
180                     ty::Dynamic(..) => {
181                         let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
182                         vtable.valid_range_mut().start = 1;
183                         vtable
184                     }
185                     _ => {
186                         return Err(LayoutError::Unknown(unsized_part));
187                     }
188                 }
189             };
190
191             // Effectively a (ptr, meta) tuple.
192             tcx.intern_layout(cx.scalar_pair(data_ptr, metadata))
193         }
194
195         ty::Dynamic(_, _, ty::DynStar) => {
196             let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
197             data.valid_range_mut().start = 0;
198             let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
199             vtable.valid_range_mut().start = 1;
200             tcx.intern_layout(cx.scalar_pair(data, vtable))
201         }
202
203         // Arrays and slices.
204         ty::Array(element, mut count) => {
205             if count.has_projections() {
206                 count = tcx.normalize_erasing_regions(param_env, count);
207                 if count.has_projections() {
208                     return Err(LayoutError::Unknown(ty));
209                 }
210             }
211
212             let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
213             let element = cx.layout_of(element)?;
214             let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
215
216             let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
217                 Abi::Uninhabited
218             } else {
219                 Abi::Aggregate { sized: true }
220             };
221
222             let largest_niche = if count != 0 { element.largest_niche } else { None };
223
224             tcx.intern_layout(LayoutS {
225                 variants: Variants::Single { index: VariantIdx::new(0) },
226                 fields: FieldsShape::Array { stride: element.size, count },
227                 abi,
228                 largest_niche,
229                 align: element.align,
230                 size,
231             })
232         }
233         ty::Slice(element) => {
234             let element = cx.layout_of(element)?;
235             tcx.intern_layout(LayoutS {
236                 variants: Variants::Single { index: VariantIdx::new(0) },
237                 fields: FieldsShape::Array { stride: element.size, count: 0 },
238                 abi: Abi::Aggregate { sized: false },
239                 largest_niche: None,
240                 align: element.align,
241                 size: Size::ZERO,
242             })
243         }
244         ty::Str => tcx.intern_layout(LayoutS {
245             variants: Variants::Single { index: VariantIdx::new(0) },
246             fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
247             abi: Abi::Aggregate { sized: false },
248             largest_niche: None,
249             align: dl.i8_align,
250             size: Size::ZERO,
251         }),
252
253         // Odd unit types.
254         ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
255         ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
256             let mut unit = univariant_uninterned(
257                 cx,
258                 ty,
259                 &[],
260                 &ReprOptions::default(),
261                 StructKind::AlwaysSized,
262             )?;
263             match unit.abi {
264                 Abi::Aggregate { ref mut sized } => *sized = false,
265                 _ => bug!(),
266             }
267             tcx.intern_layout(unit)
268         }
269
270         ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
271
272         ty::Closure(_, ref substs) => {
273             let tys = substs.as_closure().upvar_tys();
274             univariant(
275                 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
276                 &ReprOptions::default(),
277                 StructKind::AlwaysSized,
278             )?
279         }
280
281         ty::Tuple(tys) => {
282             let kind =
283                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
284
285             univariant(
286                 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
287                 &ReprOptions::default(),
288                 kind,
289             )?
290         }
291
292         // SIMD vector types.
293         ty::Adt(def, substs) if def.repr().simd() => {
294             if !def.is_struct() {
295                 // Should have yielded E0517 by now.
296                 tcx.sess.delay_span_bug(
297                     DUMMY_SP,
298                     "#[repr(simd)] was applied to an ADT that is not a struct",
299                 );
300                 return Err(LayoutError::Unknown(ty));
301             }
302
303             // Supported SIMD vectors are homogeneous ADTs with at least one field:
304             //
305             // * #[repr(simd)] struct S(T, T, T, T);
306             // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
307             // * #[repr(simd)] struct S([T; 4])
308             //
309             // where T is a primitive scalar (integer/float/pointer).
310
311             // SIMD vectors with zero fields are not supported.
312             // (should be caught by typeck)
313             if def.non_enum_variant().fields.is_empty() {
314                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
315             }
316
317             // Type of the first ADT field:
318             let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
319
320             // Heterogeneous SIMD vectors are not supported:
321             // (should be caught by typeck)
322             for fi in &def.non_enum_variant().fields {
323                 if fi.ty(tcx, substs) != f0_ty {
324                     tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
325                 }
326             }
327
328             // The element type and number of elements of the SIMD vector
329             // are obtained from:
330             //
331             // * the element type and length of the single array field, if
332             // the first field is of array type, or
333             //
334             // * the homogeneous field type and the number of fields.
335             let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
336                 // First ADT field is an array:
337
338                 // SIMD vectors with multiple array fields are not supported:
339                 // (should be caught by typeck)
340                 if def.non_enum_variant().fields.len() != 1 {
341                     tcx.sess.fatal(&format!(
342                         "monomorphising SIMD type `{}` with more than one array field",
343                         ty
344                     ));
345                 }
346
347                 // Extract the number of elements from the layout of the array field:
348                 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
349                     return Err(LayoutError::Unknown(ty));
350                 };
351
352                 (*e_ty, *count, true)
353             } else {
354                 // First ADT field is not an array:
355                 (f0_ty, def.non_enum_variant().fields.len() as _, false)
356             };
357
358             // SIMD vectors of zero length are not supported.
359             // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
360             // support.
361             //
362             // Can't be caught in typeck if the array length is generic.
363             if e_len == 0 {
364                 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
365             } else if e_len > MAX_SIMD_LANES {
366                 tcx.sess.fatal(&format!(
367                     "monomorphising SIMD type `{}` of length greater than {}",
368                     ty, MAX_SIMD_LANES,
369                 ));
370             }
371
372             // Compute the ABI of the element type:
373             let e_ly = cx.layout_of(e_ty)?;
374             let Abi::Scalar(e_abi) = e_ly.abi else {
375                 // This error isn't caught in typeck, e.g., if
376                 // the element type of the vector is generic.
377                 tcx.sess.fatal(&format!(
378                     "monomorphising SIMD type `{}` with a non-primitive-scalar \
379                     (integer/float/pointer) element type `{}`",
380                     ty, e_ty
381                 ))
382             };
383
384             // Compute the size and alignment of the vector:
385             let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
386             let align = dl.vector_align(size);
387             let size = size.align_to(align.abi);
388
389             // Compute the placement of the vector fields:
390             let fields = if is_array {
391                 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
392             } else {
393                 FieldsShape::Array { stride: e_ly.size, count: e_len }
394             };
395
396             tcx.intern_layout(LayoutS {
397                 variants: Variants::Single { index: VariantIdx::new(0) },
398                 fields,
399                 abi: Abi::Vector { element: e_abi, count: e_len },
400                 largest_niche: e_ly.largest_niche,
401                 size,
402                 align,
403             })
404         }
405
406         // ADTs.
407         ty::Adt(def, substs) => {
408             // Cache the field layouts.
409             let variants = def
410                 .variants()
411                 .iter()
412                 .map(|v| {
413                     v.fields
414                         .iter()
415                         .map(|field| cx.layout_of(field.ty(tcx, substs)))
416                         .collect::<Result<Vec<_>, _>>()
417                 })
418                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
419
420             if def.is_union() {
421                 if def.repr().pack.is_some() && def.repr().align.is_some() {
422                     cx.tcx.sess.delay_span_bug(
423                         tcx.def_span(def.did()),
424                         "union cannot be packed and aligned",
425                     );
426                     return Err(LayoutError::Unknown(ty));
427                 }
428
429                 return Ok(tcx.intern_layout(
430                     cx.layout_of_union(&def.repr(), &variants).ok_or(LayoutError::Unknown(ty))?,
431                 ));
432             }
433
434             tcx.intern_layout(
435                 cx.layout_of_struct_or_enum(
436                     &def.repr(),
437                     &variants,
438                     def.is_enum(),
439                     def.is_unsafe_cell(),
440                     tcx.layout_scalar_valid_range(def.did()),
441                     |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max),
442                     def.is_enum()
443                         .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
444                         .into_iter()
445                         .flatten(),
446                     def.repr().inhibit_enum_layout_opt()
447                         || def
448                             .variants()
449                             .iter_enumerated()
450                             .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())),
451                     {
452                         let param_env = tcx.param_env(def.did());
453                         def.is_struct()
454                             && match def.variants().iter().next().and_then(|x| x.fields.last()) {
455                                 Some(last_field) => {
456                                     tcx.type_of(last_field.did).is_sized(tcx, param_env)
457                                 }
458                                 None => false,
459                             }
460                     },
461                 )
462                 .ok_or(LayoutError::SizeOverflow(ty))?,
463             )
464         }
465
466         // Types with no meaningful known layout.
467         ty::Alias(..) => {
468             // NOTE(eddyb) `layout_of` query should've normalized these away,
469             // if that was possible, so there's no reason to try again here.
470             return Err(LayoutError::Unknown(ty));
471         }
472
473         ty::Placeholder(..)
474         | ty::GeneratorWitness(..)
475         | ty::GeneratorWitnessMIR(..)
476         | ty::Infer(_) => {
477             bug!("Layout::compute: unexpected type `{}`", ty)
478         }
479
480         ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
481             return Err(LayoutError::Unknown(ty));
482         }
483     })
484 }
485
486 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
487 #[derive(Clone, Debug, PartialEq)]
488 enum SavedLocalEligibility {
489     Unassigned,
490     Assigned(VariantIdx),
491     // FIXME: Use newtype_index so we aren't wasting bytes
492     Ineligible(Option<u32>),
493 }
494
495 // When laying out generators, we divide our saved local fields into two
496 // categories: overlap-eligible and overlap-ineligible.
497 //
498 // Those fields which are ineligible for overlap go in a "prefix" at the
499 // beginning of the layout, and always have space reserved for them.
500 //
501 // Overlap-eligible fields are only assigned to one variant, so we lay
502 // those fields out for each variant and put them right after the
503 // prefix.
504 //
505 // Finally, in the layout details, we point to the fields from the
506 // variants they are assigned to. It is possible for some fields to be
507 // included in multiple variants. No field ever "moves around" in the
508 // layout; its offset is always the same.
509 //
510 // Also included in the layout are the upvars and the discriminant.
511 // These are included as fields on the "outer" layout; they are not part
512 // of any variant.
513
514 /// Compute the eligibility and assignment of each local.
515 fn generator_saved_local_eligibility(
516     info: &GeneratorLayout<'_>,
517 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
518     use SavedLocalEligibility::*;
519
520     let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
521         IndexVec::from_elem_n(Unassigned, info.field_tys.len());
522
523     // The saved locals not eligible for overlap. These will get
524     // "promoted" to the prefix of our generator.
525     let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
526
527     // Figure out which of our saved locals are fields in only
528     // one variant. The rest are deemed ineligible for overlap.
529     for (variant_index, fields) in info.variant_fields.iter_enumerated() {
530         for local in fields {
531             match assignments[*local] {
532                 Unassigned => {
533                     assignments[*local] = Assigned(variant_index);
534                 }
535                 Assigned(idx) => {
536                     // We've already seen this local at another suspension
537                     // point, so it is no longer a candidate.
538                     trace!(
539                         "removing local {:?} in >1 variant ({:?}, {:?})",
540                         local,
541                         variant_index,
542                         idx
543                     );
544                     ineligible_locals.insert(*local);
545                     assignments[*local] = Ineligible(None);
546                 }
547                 Ineligible(_) => {}
548             }
549         }
550     }
551
552     // Next, check every pair of eligible locals to see if they
553     // conflict.
554     for local_a in info.storage_conflicts.rows() {
555         let conflicts_a = info.storage_conflicts.count(local_a);
556         if ineligible_locals.contains(local_a) {
557             continue;
558         }
559
560         for local_b in info.storage_conflicts.iter(local_a) {
561             // local_a and local_b are storage live at the same time, therefore they
562             // cannot overlap in the generator layout. The only way to guarantee
563             // this is if they are in the same variant, or one is ineligible
564             // (which means it is stored in every variant).
565             if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
566                 continue;
567             }
568
569             // If they conflict, we will choose one to make ineligible.
570             // This is not always optimal; it's just a greedy heuristic that
571             // seems to produce good results most of the time.
572             let conflicts_b = info.storage_conflicts.count(local_b);
573             let (remove, other) =
574                 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
575             ineligible_locals.insert(remove);
576             assignments[remove] = Ineligible(None);
577             trace!("removing local {:?} due to conflict with {:?}", remove, other);
578         }
579     }
580
581     // Count the number of variants in use. If only one of them, then it is
582     // impossible to overlap any locals in our layout. In this case it's
583     // always better to make the remaining locals ineligible, so we can
584     // lay them out with the other locals in the prefix and eliminate
585     // unnecessary padding bytes.
586     {
587         let mut used_variants = BitSet::new_empty(info.variant_fields.len());
588         for assignment in &assignments {
589             if let Assigned(idx) = assignment {
590                 used_variants.insert(*idx);
591             }
592         }
593         if used_variants.count() < 2 {
594             for assignment in assignments.iter_mut() {
595                 *assignment = Ineligible(None);
596             }
597             ineligible_locals.insert_all();
598         }
599     }
600
601     // Write down the order of our locals that will be promoted to the prefix.
602     {
603         for (idx, local) in ineligible_locals.iter().enumerate() {
604             assignments[local] = Ineligible(Some(idx as u32));
605         }
606     }
607     debug!("generator saved local assignments: {:?}", assignments);
608
609     (ineligible_locals, assignments)
610 }
611
612 /// Compute the full generator layout.
613 fn generator_layout<'tcx>(
614     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
615     ty: Ty<'tcx>,
616     def_id: hir::def_id::DefId,
617     substs: SubstsRef<'tcx>,
618 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
619     use SavedLocalEligibility::*;
620     let tcx = cx.tcx;
621     let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
622
623     let Some(info) = tcx.generator_layout(def_id) else {
624         return Err(LayoutError::Unknown(ty));
625     };
626     let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
627
628     // Build a prefix layout, including "promoting" all ineligible
629     // locals as part of the prefix. We compute the layout of all of
630     // these fields at once to get optimal packing.
631     let tag_index = substs.as_generator().prefix_tys().count();
632
633     // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
634     let max_discr = (info.variant_fields.len() - 1) as u128;
635     let discr_int = Integer::fit_unsigned(max_discr);
636     let discr_int_ty = discr_int.to_ty(tcx, false);
637     let tag = Scalar::Initialized {
638         value: Primitive::Int(discr_int, false),
639         valid_range: WrappingRange { start: 0, end: max_discr },
640     };
641     let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
642     let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
643
644     let promoted_layouts = ineligible_locals
645         .iter()
646         .map(|local| subst_field(info.field_tys[local].ty))
647         .map(|ty| tcx.mk_maybe_uninit(ty))
648         .map(|ty| cx.layout_of(ty));
649     let prefix_layouts = substs
650         .as_generator()
651         .prefix_tys()
652         .map(|ty| cx.layout_of(ty))
653         .chain(iter::once(Ok(tag_layout)))
654         .chain(promoted_layouts)
655         .collect::<Result<Vec<_>, _>>()?;
656     let prefix = univariant_uninterned(
657         cx,
658         ty,
659         &prefix_layouts,
660         &ReprOptions::default(),
661         StructKind::AlwaysSized,
662     )?;
663
664     let (prefix_size, prefix_align) = (prefix.size, prefix.align);
665
666     // Split the prefix layout into the "outer" fields (upvars and
667     // discriminant) and the "promoted" fields. Promoted fields will
668     // get included in each variant that requested them in
669     // GeneratorLayout.
670     debug!("prefix = {:#?}", prefix);
671     let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
672         FieldsShape::Arbitrary { mut offsets, memory_index } => {
673             let mut inverse_memory_index = invert_mapping(&memory_index);
674
675             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
676             // "outer" and "promoted" fields respectively.
677             let b_start = (tag_index + 1) as u32;
678             let offsets_b = offsets.split_off(b_start as usize);
679             let offsets_a = offsets;
680
681             // Disentangle the "a" and "b" components of `inverse_memory_index`
682             // by preserving the order but keeping only one disjoint "half" each.
683             // FIXME(eddyb) build a better abstraction for permutations, if possible.
684             let inverse_memory_index_b: Vec<_> =
685                 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
686             inverse_memory_index.retain(|&i| i < b_start);
687             let inverse_memory_index_a = inverse_memory_index;
688
689             // Since `inverse_memory_index_{a,b}` each only refer to their
690             // respective fields, they can be safely inverted
691             let memory_index_a = invert_mapping(&inverse_memory_index_a);
692             let memory_index_b = invert_mapping(&inverse_memory_index_b);
693
694             let outer_fields =
695                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
696             (outer_fields, offsets_b, memory_index_b)
697         }
698         _ => bug!(),
699     };
700
701     let mut size = prefix.size;
702     let mut align = prefix.align;
703     let variants = info
704         .variant_fields
705         .iter_enumerated()
706         .map(|(index, variant_fields)| {
707             // Only include overlap-eligible fields when we compute our variant layout.
708             let variant_only_tys = variant_fields
709                 .iter()
710                 .filter(|local| match assignments[**local] {
711                     Unassigned => bug!(),
712                     Assigned(v) if v == index => true,
713                     Assigned(_) => bug!("assignment does not match variant"),
714                     Ineligible(_) => false,
715                 })
716                 .map(|local| subst_field(info.field_tys[*local].ty));
717
718             let mut variant = univariant_uninterned(
719                 cx,
720                 ty,
721                 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
722                 &ReprOptions::default(),
723                 StructKind::Prefixed(prefix_size, prefix_align.abi),
724             )?;
725             variant.variants = Variants::Single { index };
726
727             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
728                 bug!();
729             };
730
731             // Now, stitch the promoted and variant-only fields back together in
732             // the order they are mentioned by our GeneratorLayout.
733             // Because we only use some subset (that can differ between variants)
734             // of the promoted fields, we can't just pick those elements of the
735             // `promoted_memory_index` (as we'd end up with gaps).
736             // So instead, we build an "inverse memory_index", as if all of the
737             // promoted fields were being used, but leave the elements not in the
738             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
739             // obtain a valid (bijective) mapping.
740             const INVALID_FIELD_IDX: u32 = !0;
741             let mut combined_inverse_memory_index =
742                 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
743             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
744             let combined_offsets = variant_fields
745                 .iter()
746                 .enumerate()
747                 .map(|(i, local)| {
748                     let (offset, memory_index) = match assignments[*local] {
749                         Unassigned => bug!(),
750                         Assigned(_) => {
751                             let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
752                             (offset, promoted_memory_index.len() as u32 + memory_index)
753                         }
754                         Ineligible(field_idx) => {
755                             let field_idx = field_idx.unwrap() as usize;
756                             (promoted_offsets[field_idx], promoted_memory_index[field_idx])
757                         }
758                     };
759                     combined_inverse_memory_index[memory_index as usize] = i as u32;
760                     offset
761                 })
762                 .collect();
763
764             // Remove the unused slots and invert the mapping to obtain the
765             // combined `memory_index` (also see previous comment).
766             combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
767             let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
768
769             variant.fields = FieldsShape::Arbitrary {
770                 offsets: combined_offsets,
771                 memory_index: combined_memory_index,
772             };
773
774             size = size.max(variant.size);
775             align = align.max(variant.align);
776             Ok(variant)
777         })
778         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
779
780     size = size.align_to(align.abi);
781
782     let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
783         Abi::Uninhabited
784     } else {
785         Abi::Aggregate { sized: true }
786     };
787
788     let layout = tcx.intern_layout(LayoutS {
789         variants: Variants::Multiple {
790             tag,
791             tag_encoding: TagEncoding::Direct,
792             tag_field: tag_index,
793             variants,
794         },
795         fields: outer_fields,
796         abi,
797         largest_niche: prefix.largest_niche,
798         size,
799         align,
800     });
801     debug!("generator layout ({:?}): {:#?}", ty, layout);
802     Ok(layout)
803 }
804
805 /// This is invoked by the `layout_of` query to record the final
806 /// layout of each type.
807 #[inline(always)]
808 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
809     // If we are running with `-Zprint-type-sizes`, maybe record layouts
810     // for dumping later.
811     if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
812         record_layout_for_printing_outlined(cx, layout)
813     }
814 }
815
816 fn record_layout_for_printing_outlined<'tcx>(
817     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
818     layout: TyAndLayout<'tcx>,
819 ) {
820     // Ignore layouts that are done with non-empty environments or
821     // non-monomorphic layouts, as the user only wants to see the stuff
822     // resulting from the final codegen session.
823     if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
824         return;
825     }
826
827     // (delay format until we actually need it)
828     let record = |kind, packed, opt_discr_size, variants| {
829         let type_desc = format!("{:?}", layout.ty);
830         cx.tcx.sess.code_stats.record_type_size(
831             kind,
832             type_desc,
833             layout.align.abi,
834             layout.size,
835             packed,
836             opt_discr_size,
837             variants,
838         );
839     };
840
841     match *layout.ty.kind() {
842         ty::Adt(adt_def, _) => {
843             debug!("print-type-size t: `{:?}` process adt", layout.ty);
844             let adt_kind = adt_def.adt_kind();
845             let adt_packed = adt_def.repr().pack.is_some();
846             let (variant_infos, opt_discr_size) = variant_info_for_adt(cx, layout, adt_def);
847             record(adt_kind.into(), adt_packed, opt_discr_size, variant_infos);
848         }
849
850         ty::Generator(def_id, substs, _) => {
851             debug!("print-type-size t: `{:?}` record generator", layout.ty);
852             // Generators always have a begin/poisoned/end state with additional suspend points
853             let (variant_infos, opt_discr_size) =
854                 variant_info_for_generator(cx, layout, def_id, substs);
855             record(DataTypeKind::Generator, false, opt_discr_size, variant_infos);
856         }
857
858         ty::Closure(..) => {
859             debug!("print-type-size t: `{:?}` record closure", layout.ty);
860             record(DataTypeKind::Closure, false, None, vec![]);
861         }
862
863         _ => {
864             debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
865         }
866     };
867 }
868
869 fn variant_info_for_adt<'tcx>(
870     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
871     layout: TyAndLayout<'tcx>,
872     adt_def: AdtDef<'tcx>,
873 ) -> (Vec<VariantInfo>, Option<Size>) {
874     let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
875         let mut min_size = Size::ZERO;
876         let field_info: Vec<_> = flds
877             .iter()
878             .enumerate()
879             .map(|(i, &name)| {
880                 let field_layout = layout.field(cx, i);
881                 let offset = layout.fields.offset(i);
882                 min_size = min_size.max(offset + field_layout.size);
883                 FieldInfo {
884                     kind: FieldKind::AdtField,
885                     name,
886                     offset: offset.bytes(),
887                     size: field_layout.size.bytes(),
888                     align: field_layout.align.abi.bytes(),
889                 }
890             })
891             .collect();
892
893         VariantInfo {
894             name: n,
895             kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
896             align: layout.align.abi.bytes(),
897             size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
898             fields: field_info,
899         }
900     };
901
902     match layout.variants {
903         Variants::Single { index } => {
904             if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
905                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
906                 let variant_def = &adt_def.variant(index);
907                 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
908                 (vec![build_variant_info(Some(variant_def.name), &fields, layout)], None)
909             } else {
910                 (vec![], None)
911             }
912         }
913
914         Variants::Multiple { tag, ref tag_encoding, .. } => {
915             debug!(
916                 "print-type-size `{:#?}` adt general variants def {}",
917                 layout.ty,
918                 adt_def.variants().len()
919             );
920             let variant_infos: Vec<_> = adt_def
921                 .variants()
922                 .iter_enumerated()
923                 .map(|(i, variant_def)| {
924                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
925                     build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
926                 })
927                 .collect();
928
929             (
930                 variant_infos,
931                 match tag_encoding {
932                     TagEncoding::Direct => Some(tag.size(cx)),
933                     _ => None,
934                 },
935             )
936         }
937     }
938 }
939
940 fn variant_info_for_generator<'tcx>(
941     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
942     layout: TyAndLayout<'tcx>,
943     def_id: DefId,
944     substs: ty::SubstsRef<'tcx>,
945 ) -> (Vec<VariantInfo>, Option<Size>) {
946     let Variants::Multiple { tag, ref tag_encoding, tag_field, .. } = layout.variants else {
947         return (vec![], None);
948     };
949
950     let (generator, state_specific_names) = cx.tcx.generator_layout_and_saved_local_names(def_id);
951     let upvar_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
952
953     let mut upvars_size = Size::ZERO;
954     let upvar_fields: Vec<_> = substs
955         .as_generator()
956         .upvar_tys()
957         .zip(upvar_names)
958         .enumerate()
959         .map(|(field_idx, (_, name))| {
960             let field_layout = layout.field(cx, field_idx);
961             let offset = layout.fields.offset(field_idx);
962             upvars_size = upvars_size.max(offset + field_layout.size);
963             FieldInfo {
964                 kind: FieldKind::Upvar,
965                 name: Symbol::intern(&name),
966                 offset: offset.bytes(),
967                 size: field_layout.size.bytes(),
968                 align: field_layout.align.abi.bytes(),
969             }
970         })
971         .collect();
972
973     let mut variant_infos: Vec<_> = generator
974         .variant_fields
975         .iter_enumerated()
976         .map(|(variant_idx, variant_def)| {
977             let variant_layout = layout.for_variant(cx, variant_idx);
978             let mut variant_size = Size::ZERO;
979             let fields = variant_def
980                 .iter()
981                 .enumerate()
982                 .map(|(field_idx, local)| {
983                     let field_layout = variant_layout.field(cx, field_idx);
984                     let offset = variant_layout.fields.offset(field_idx);
985                     // The struct is as large as the last field's end
986                     variant_size = variant_size.max(offset + field_layout.size);
987                     FieldInfo {
988                         kind: FieldKind::GeneratorLocal,
989                         name: state_specific_names.get(*local).copied().flatten().unwrap_or(
990                             Symbol::intern(&format!(".generator_field{}", local.as_usize())),
991                         ),
992                         offset: offset.bytes(),
993                         size: field_layout.size.bytes(),
994                         align: field_layout.align.abi.bytes(),
995                     }
996                 })
997                 .chain(upvar_fields.iter().copied())
998                 .collect();
999
1000             // If the variant has no state-specific fields, then it's the size of the upvars.
1001             if variant_size == Size::ZERO {
1002                 variant_size = upvars_size;
1003             }
1004
1005             // This `if` deserves some explanation.
1006             //
1007             // The layout code has a choice of where to place the discriminant of this generator.
1008             // If the discriminant of the generator is placed early in the layout (before the
1009             // variant's own fields), then it'll implicitly be counted towards the size of the
1010             // variant, since we use the maximum offset to calculate size.
1011             //    (side-note: I know this is a bit problematic given upvars placement, etc).
1012             //
1013             // This is important, since the layout printing code always subtracts this discriminant
1014             // size from the variant size if the struct is "enum"-like, so failing to account for it
1015             // will either lead to numerical underflow, or an underreported variant size...
1016             //
1017             // However, if the discriminant is placed past the end of the variant, then we need
1018             // to factor in the size of the discriminant manually. This really should be refactored
1019             // better, but this "works" for now.
1020             if layout.fields.offset(tag_field) >= variant_size {
1021                 variant_size += match tag_encoding {
1022                     TagEncoding::Direct => tag.size(cx),
1023                     _ => Size::ZERO,
1024                 };
1025             }
1026
1027             VariantInfo {
1028                 name: Some(Symbol::intern(&ty::GeneratorSubsts::variant_name(variant_idx))),
1029                 kind: SizeKind::Exact,
1030                 size: variant_size.bytes(),
1031                 align: variant_layout.align.abi.bytes(),
1032                 fields,
1033             }
1034         })
1035         .collect();
1036
1037     // The first three variants are hardcoded to be `UNRESUMED`, `RETURNED` and `POISONED`.
1038     // We will move the `RETURNED` and `POISONED` elements to the end so we
1039     // are left with a sorting order according to the generators yield points:
1040     // First `Unresumed`, then the `SuspendN` followed by `Returned` and `Panicked` (POISONED).
1041     let end_states = variant_infos.drain(1..=2);
1042     let end_states: Vec<_> = end_states.collect();
1043     variant_infos.extend(end_states);
1044
1045     (
1046         variant_infos,
1047         match tag_encoding {
1048             TagEncoding::Direct => Some(tag.size(cx)),
1049             _ => None,
1050         },
1051     )
1052 }