1 use hir::def_id::DefId;
3 use rustc_index::bit_set::BitSet;
4 use rustc_index::vec::{Idx, IndexVec};
5 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
6 use rustc_middle::ty::layout::{
7 IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
9 use rustc_middle::ty::{
10 self, subst::SubstsRef, AdtDef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
12 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::DUMMY_SP;
15 use rustc_target::abi::*;
20 use crate::layout_sanity_check::sanity_check_layout;
22 pub fn provide(providers: &mut ty::query::Providers) {
23 *providers = ty::query::Providers { layout_of, ..*providers };
26 #[instrument(skip(tcx, query), level = "debug")]
29 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
30 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
31 let (param_env, ty) = query.into_parts();
34 let param_env = param_env.with_reveal_all_normalized(tcx);
35 let unnormalized_ty = ty;
37 // FIXME: We might want to have two different versions of `layout_of`:
38 // One that can be called after typecheck has completed and can use
39 // `normalize_erasing_regions` here and another one that can be called
40 // before typecheck has completed and uses `try_normalize_erasing_regions`.
41 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
43 Err(normalization_error) => {
44 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
48 if ty != unnormalized_ty {
49 // Ensure this layout is also cached for the normalized type.
50 return tcx.layout_of(param_env.and(ty));
53 let cx = LayoutCx { tcx, param_env };
55 let layout = layout_of_uncached(&cx, ty)?;
56 let layout = TyAndLayout { ty, layout };
58 record_layout_for_printing(&cx, layout);
60 sanity_check_layout(&cx, &layout);
65 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
66 // This is used to go between `memory_index` (source field order to memory order)
67 // and `inverse_memory_index` (memory order to source field order).
68 // See also `FieldsShape::Arbitrary::memory_index` for more details.
69 // FIXME(eddyb) build a better abstraction for permutations, if possible.
70 fn invert_mapping(map: &[u32]) -> Vec<u32> {
71 let mut inverse = vec![0; map.len()];
72 for i in 0..map.len() {
73 inverse[map[i] as usize] = i as u32;
78 fn univariant_uninterned<'tcx>(
79 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
81 fields: &[TyAndLayout<'_>],
84 ) -> Result<LayoutS<VariantIdx>, LayoutError<'tcx>> {
85 let dl = cx.data_layout();
87 if pack.is_some() && repr.align.is_some() {
88 cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
89 return Err(LayoutError::Unknown(ty));
92 cx.univariant(dl, fields, repr, kind).ok_or(LayoutError::SizeOverflow(ty))
95 fn layout_of_uncached<'tcx>(
96 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
98 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
100 let param_env = cx.param_env;
101 let dl = cx.data_layout();
102 let scalar_unit = |value: Primitive| {
103 let size = value.size(dl);
104 assert!(size.bits() <= 128);
105 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
107 let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
109 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
110 Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
112 debug_assert!(!ty.has_non_region_infer());
114 Ok(match *ty.kind() {
116 ty::Bool => tcx.intern_layout(LayoutS::scalar(
118 Scalar::Initialized {
119 value: Int(I8, false),
120 valid_range: WrappingRange { start: 0, end: 1 },
123 ty::Char => tcx.intern_layout(LayoutS::scalar(
125 Scalar::Initialized {
126 value: Int(I32, false),
127 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
130 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
131 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
132 ty::Float(fty) => scalar(match fty {
133 ty::FloatTy::F32 => F32,
134 ty::FloatTy::F64 => F64,
137 let mut ptr = scalar_unit(Pointer);
138 ptr.valid_range_mut().start = 1;
139 tcx.intern_layout(LayoutS::scalar(cx, ptr))
143 ty::Never => tcx.intern_layout(cx.layout_of_never_type()),
145 // Potentially-wide pointers.
146 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
147 let mut data_ptr = scalar_unit(Pointer);
148 if !ty.is_unsafe_ptr() {
149 data_ptr.valid_range_mut().start = 1;
152 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
153 if pointee.is_sized(tcx, param_env) {
154 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
157 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
158 let metadata = match unsized_part.kind() {
160 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
162 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
164 let mut vtable = scalar_unit(Pointer);
165 vtable.valid_range_mut().start = 1;
168 _ => return Err(LayoutError::Unknown(unsized_part)),
171 // Effectively a (ptr, meta) tuple.
172 tcx.intern_layout(cx.scalar_pair(data_ptr, metadata))
175 ty::Dynamic(_, _, ty::DynStar) => {
176 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
177 data.valid_range_mut().start = 0;
178 let mut vtable = scalar_unit(Pointer);
179 vtable.valid_range_mut().start = 1;
180 tcx.intern_layout(cx.scalar_pair(data, vtable))
183 // Arrays and slices.
184 ty::Array(element, mut count) => {
185 if count.has_projections() {
186 count = tcx.normalize_erasing_regions(param_env, count);
187 if count.has_projections() {
188 return Err(LayoutError::Unknown(ty));
192 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
193 let element = cx.layout_of(element)?;
194 let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
196 let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
199 Abi::Aggregate { sized: true }
202 let largest_niche = if count != 0 { element.largest_niche } else { None };
204 tcx.intern_layout(LayoutS {
205 variants: Variants::Single { index: VariantIdx::new(0) },
206 fields: FieldsShape::Array { stride: element.size, count },
209 align: element.align,
213 ty::Slice(element) => {
214 let element = cx.layout_of(element)?;
215 tcx.intern_layout(LayoutS {
216 variants: Variants::Single { index: VariantIdx::new(0) },
217 fields: FieldsShape::Array { stride: element.size, count: 0 },
218 abi: Abi::Aggregate { sized: false },
220 align: element.align,
224 ty::Str => tcx.intern_layout(LayoutS {
225 variants: Variants::Single { index: VariantIdx::new(0) },
226 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
227 abi: Abi::Aggregate { sized: false },
234 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
235 ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
236 let mut unit = univariant_uninterned(
240 &ReprOptions::default(),
241 StructKind::AlwaysSized,
244 Abi::Aggregate { ref mut sized } => *sized = false,
247 tcx.intern_layout(unit)
250 ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
252 ty::Closure(_, ref substs) => {
253 let tys = substs.as_closure().upvar_tys();
255 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
256 &ReprOptions::default(),
257 StructKind::AlwaysSized,
263 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
266 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
267 &ReprOptions::default(),
272 // SIMD vector types.
273 ty::Adt(def, substs) if def.repr().simd() => {
274 if !def.is_struct() {
275 // Should have yielded E0517 by now.
276 tcx.sess.delay_span_bug(
278 "#[repr(simd)] was applied to an ADT that is not a struct",
280 return Err(LayoutError::Unknown(ty));
283 // Supported SIMD vectors are homogeneous ADTs with at least one field:
285 // * #[repr(simd)] struct S(T, T, T, T);
286 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
287 // * #[repr(simd)] struct S([T; 4])
289 // where T is a primitive scalar (integer/float/pointer).
291 // SIMD vectors with zero fields are not supported.
292 // (should be caught by typeck)
293 if def.non_enum_variant().fields.is_empty() {
294 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
297 // Type of the first ADT field:
298 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
300 // Heterogeneous SIMD vectors are not supported:
301 // (should be caught by typeck)
302 for fi in &def.non_enum_variant().fields {
303 if fi.ty(tcx, substs) != f0_ty {
304 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
308 // The element type and number of elements of the SIMD vector
309 // are obtained from:
311 // * the element type and length of the single array field, if
312 // the first field is of array type, or
314 // * the homogeneous field type and the number of fields.
315 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
316 // First ADT field is an array:
318 // SIMD vectors with multiple array fields are not supported:
319 // (should be caught by typeck)
320 if def.non_enum_variant().fields.len() != 1 {
321 tcx.sess.fatal(&format!(
322 "monomorphising SIMD type `{}` with more than one array field",
327 // Extract the number of elements from the layout of the array field:
328 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
329 return Err(LayoutError::Unknown(ty));
332 (*e_ty, *count, true)
334 // First ADT field is not an array:
335 (f0_ty, def.non_enum_variant().fields.len() as _, false)
338 // SIMD vectors of zero length are not supported.
339 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
342 // Can't be caught in typeck if the array length is generic.
344 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
345 } else if e_len > MAX_SIMD_LANES {
346 tcx.sess.fatal(&format!(
347 "monomorphising SIMD type `{}` of length greater than {}",
352 // Compute the ABI of the element type:
353 let e_ly = cx.layout_of(e_ty)?;
354 let Abi::Scalar(e_abi) = e_ly.abi else {
355 // This error isn't caught in typeck, e.g., if
356 // the element type of the vector is generic.
357 tcx.sess.fatal(&format!(
358 "monomorphising SIMD type `{}` with a non-primitive-scalar \
359 (integer/float/pointer) element type `{}`",
364 // Compute the size and alignment of the vector:
365 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
366 let align = dl.vector_align(size);
367 let size = size.align_to(align.abi);
369 // Compute the placement of the vector fields:
370 let fields = if is_array {
371 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
373 FieldsShape::Array { stride: e_ly.size, count: e_len }
376 tcx.intern_layout(LayoutS {
377 variants: Variants::Single { index: VariantIdx::new(0) },
379 abi: Abi::Vector { element: e_abi, count: e_len },
380 largest_niche: e_ly.largest_niche,
387 ty::Adt(def, substs) => {
388 // Cache the field layouts.
395 .map(|field| cx.layout_of(field.ty(tcx, substs)))
396 .collect::<Result<Vec<_>, _>>()
398 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
401 if def.repr().pack.is_some() && def.repr().align.is_some() {
402 cx.tcx.sess.delay_span_bug(
403 tcx.def_span(def.did()),
404 "union cannot be packed and aligned",
406 return Err(LayoutError::Unknown(ty));
409 return Ok(tcx.intern_layout(
410 cx.layout_of_union(&def.repr(), &variants).ok_or(LayoutError::Unknown(ty))?,
415 cx.layout_of_struct_or_enum(
419 def.is_unsafe_cell(),
420 tcx.layout_scalar_valid_range(def.did()),
421 |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max),
423 .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
426 def.repr().inhibit_enum_layout_opt()
430 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())),
432 let param_env = tcx.param_env(def.did());
434 && match def.variants().iter().next().and_then(|x| x.fields.last()) {
435 Some(last_field) => {
436 tcx.type_of(last_field.did).is_sized(tcx, param_env)
442 .ok_or(LayoutError::SizeOverflow(ty))?,
446 // Types with no meaningful known layout.
447 ty::Projection(_) | ty::Opaque(..) => {
448 // NOTE(eddyb) `layout_of` query should've normalized these away,
449 // if that was possible, so there's no reason to try again here.
450 return Err(LayoutError::Unknown(ty));
453 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
454 bug!("Layout::compute: unexpected type `{}`", ty)
457 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
458 return Err(LayoutError::Unknown(ty));
463 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
464 #[derive(Clone, Debug, PartialEq)]
465 enum SavedLocalEligibility {
467 Assigned(VariantIdx),
468 // FIXME: Use newtype_index so we aren't wasting bytes
469 Ineligible(Option<u32>),
472 // When laying out generators, we divide our saved local fields into two
473 // categories: overlap-eligible and overlap-ineligible.
475 // Those fields which are ineligible for overlap go in a "prefix" at the
476 // beginning of the layout, and always have space reserved for them.
478 // Overlap-eligible fields are only assigned to one variant, so we lay
479 // those fields out for each variant and put them right after the
482 // Finally, in the layout details, we point to the fields from the
483 // variants they are assigned to. It is possible for some fields to be
484 // included in multiple variants. No field ever "moves around" in the
485 // layout; its offset is always the same.
487 // Also included in the layout are the upvars and the discriminant.
488 // These are included as fields on the "outer" layout; they are not part
491 /// Compute the eligibility and assignment of each local.
492 fn generator_saved_local_eligibility<'tcx>(
493 info: &GeneratorLayout<'tcx>,
494 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
495 use SavedLocalEligibility::*;
497 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
498 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
500 // The saved locals not eligible for overlap. These will get
501 // "promoted" to the prefix of our generator.
502 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
504 // Figure out which of our saved locals are fields in only
505 // one variant. The rest are deemed ineligible for overlap.
506 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
507 for local in fields {
508 match assignments[*local] {
510 assignments[*local] = Assigned(variant_index);
513 // We've already seen this local at another suspension
514 // point, so it is no longer a candidate.
516 "removing local {:?} in >1 variant ({:?}, {:?})",
521 ineligible_locals.insert(*local);
522 assignments[*local] = Ineligible(None);
529 // Next, check every pair of eligible locals to see if they
531 for local_a in info.storage_conflicts.rows() {
532 let conflicts_a = info.storage_conflicts.count(local_a);
533 if ineligible_locals.contains(local_a) {
537 for local_b in info.storage_conflicts.iter(local_a) {
538 // local_a and local_b are storage live at the same time, therefore they
539 // cannot overlap in the generator layout. The only way to guarantee
540 // this is if they are in the same variant, or one is ineligible
541 // (which means it is stored in every variant).
542 if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
546 // If they conflict, we will choose one to make ineligible.
547 // This is not always optimal; it's just a greedy heuristic that
548 // seems to produce good results most of the time.
549 let conflicts_b = info.storage_conflicts.count(local_b);
550 let (remove, other) =
551 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
552 ineligible_locals.insert(remove);
553 assignments[remove] = Ineligible(None);
554 trace!("removing local {:?} due to conflict with {:?}", remove, other);
558 // Count the number of variants in use. If only one of them, then it is
559 // impossible to overlap any locals in our layout. In this case it's
560 // always better to make the remaining locals ineligible, so we can
561 // lay them out with the other locals in the prefix and eliminate
562 // unnecessary padding bytes.
564 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
565 for assignment in &assignments {
566 if let Assigned(idx) = assignment {
567 used_variants.insert(*idx);
570 if used_variants.count() < 2 {
571 for assignment in assignments.iter_mut() {
572 *assignment = Ineligible(None);
574 ineligible_locals.insert_all();
578 // Write down the order of our locals that will be promoted to the prefix.
580 for (idx, local) in ineligible_locals.iter().enumerate() {
581 assignments[local] = Ineligible(Some(idx as u32));
584 debug!("generator saved local assignments: {:?}", assignments);
586 (ineligible_locals, assignments)
589 /// Compute the full generator layout.
590 fn generator_layout<'tcx>(
591 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
593 def_id: hir::def_id::DefId,
594 substs: SubstsRef<'tcx>,
595 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
596 use SavedLocalEligibility::*;
598 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
600 let Some(info) = tcx.generator_layout(def_id) else {
601 return Err(LayoutError::Unknown(ty));
603 let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
605 // Build a prefix layout, including "promoting" all ineligible
606 // locals as part of the prefix. We compute the layout of all of
607 // these fields at once to get optimal packing.
608 let tag_index = substs.as_generator().prefix_tys().count();
610 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
611 let max_discr = (info.variant_fields.len() - 1) as u128;
612 let discr_int = Integer::fit_unsigned(max_discr);
613 let discr_int_ty = discr_int.to_ty(tcx, false);
614 let tag = Scalar::Initialized {
615 value: Primitive::Int(discr_int, false),
616 valid_range: WrappingRange { start: 0, end: max_discr },
618 let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
619 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
621 let promoted_layouts = ineligible_locals
623 .map(|local| subst_field(info.field_tys[local]))
624 .map(|ty| tcx.mk_maybe_uninit(ty))
625 .map(|ty| cx.layout_of(ty));
626 let prefix_layouts = substs
629 .map(|ty| cx.layout_of(ty))
630 .chain(iter::once(Ok(tag_layout)))
631 .chain(promoted_layouts)
632 .collect::<Result<Vec<_>, _>>()?;
633 let prefix = univariant_uninterned(
637 &ReprOptions::default(),
638 StructKind::AlwaysSized,
641 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
643 // Split the prefix layout into the "outer" fields (upvars and
644 // discriminant) and the "promoted" fields. Promoted fields will
645 // get included in each variant that requested them in
647 debug!("prefix = {:#?}", prefix);
648 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
649 FieldsShape::Arbitrary { mut offsets, memory_index } => {
650 let mut inverse_memory_index = invert_mapping(&memory_index);
652 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
653 // "outer" and "promoted" fields respectively.
654 let b_start = (tag_index + 1) as u32;
655 let offsets_b = offsets.split_off(b_start as usize);
656 let offsets_a = offsets;
658 // Disentangle the "a" and "b" components of `inverse_memory_index`
659 // by preserving the order but keeping only one disjoint "half" each.
660 // FIXME(eddyb) build a better abstraction for permutations, if possible.
661 let inverse_memory_index_b: Vec<_> =
662 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
663 inverse_memory_index.retain(|&i| i < b_start);
664 let inverse_memory_index_a = inverse_memory_index;
666 // Since `inverse_memory_index_{a,b}` each only refer to their
667 // respective fields, they can be safely inverted
668 let memory_index_a = invert_mapping(&inverse_memory_index_a);
669 let memory_index_b = invert_mapping(&inverse_memory_index_b);
672 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
673 (outer_fields, offsets_b, memory_index_b)
678 let mut size = prefix.size;
679 let mut align = prefix.align;
683 .map(|(index, variant_fields)| {
684 // Only include overlap-eligible fields when we compute our variant layout.
685 let variant_only_tys = variant_fields
687 .filter(|local| match assignments[**local] {
688 Unassigned => bug!(),
689 Assigned(v) if v == index => true,
690 Assigned(_) => bug!("assignment does not match variant"),
691 Ineligible(_) => false,
693 .map(|local| subst_field(info.field_tys[*local]));
695 let mut variant = univariant_uninterned(
698 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
699 &ReprOptions::default(),
700 StructKind::Prefixed(prefix_size, prefix_align.abi),
702 variant.variants = Variants::Single { index };
704 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
708 // Now, stitch the promoted and variant-only fields back together in
709 // the order they are mentioned by our GeneratorLayout.
710 // Because we only use some subset (that can differ between variants)
711 // of the promoted fields, we can't just pick those elements of the
712 // `promoted_memory_index` (as we'd end up with gaps).
713 // So instead, we build an "inverse memory_index", as if all of the
714 // promoted fields were being used, but leave the elements not in the
715 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
716 // obtain a valid (bijective) mapping.
717 const INVALID_FIELD_IDX: u32 = !0;
718 let mut combined_inverse_memory_index =
719 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
720 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
721 let combined_offsets = variant_fields
725 let (offset, memory_index) = match assignments[*local] {
726 Unassigned => bug!(),
728 let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
729 (offset, promoted_memory_index.len() as u32 + memory_index)
731 Ineligible(field_idx) => {
732 let field_idx = field_idx.unwrap() as usize;
733 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
736 combined_inverse_memory_index[memory_index as usize] = i as u32;
741 // Remove the unused slots and invert the mapping to obtain the
742 // combined `memory_index` (also see previous comment).
743 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
744 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
746 variant.fields = FieldsShape::Arbitrary {
747 offsets: combined_offsets,
748 memory_index: combined_memory_index,
751 size = size.max(variant.size);
752 align = align.max(variant.align);
755 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
757 size = size.align_to(align.abi);
759 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
762 Abi::Aggregate { sized: true }
765 let layout = tcx.intern_layout(LayoutS {
766 variants: Variants::Multiple {
768 tag_encoding: TagEncoding::Direct,
769 tag_field: tag_index,
772 fields: outer_fields,
774 largest_niche: prefix.largest_niche,
778 debug!("generator layout ({:?}): {:#?}", ty, layout);
782 /// This is invoked by the `layout_of` query to record the final
783 /// layout of each type.
785 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
786 // If we are running with `-Zprint-type-sizes`, maybe record layouts
787 // for dumping later.
788 if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
789 record_layout_for_printing_outlined(cx, layout)
793 fn record_layout_for_printing_outlined<'tcx>(
794 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
795 layout: TyAndLayout<'tcx>,
797 // Ignore layouts that are done with non-empty environments or
798 // non-monomorphic layouts, as the user only wants to see the stuff
799 // resulting from the final codegen session.
800 if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
804 // (delay format until we actually need it)
805 let record = |kind, packed, opt_discr_size, variants| {
806 let type_desc = format!("{:?}", layout.ty);
807 cx.tcx.sess.code_stats.record_type_size(
818 match *layout.ty.kind() {
819 ty::Adt(adt_def, _) => {
820 debug!("print-type-size t: `{:?}` process adt", layout.ty);
821 let adt_kind = adt_def.adt_kind();
822 let adt_packed = adt_def.repr().pack.is_some();
823 let (variant_infos, opt_discr_size) = variant_info_for_adt(cx, layout, adt_def);
824 record(adt_kind.into(), adt_packed, opt_discr_size, variant_infos);
827 ty::Generator(def_id, substs, _) => {
828 debug!("print-type-size t: `{:?}` record generator", layout.ty);
829 // Generators always have a begin/poisoned/end state with additional suspend points
830 let (variant_infos, opt_discr_size) =
831 variant_info_for_generator(cx, layout, def_id, substs);
832 record(DataTypeKind::Generator, false, opt_discr_size, variant_infos);
836 debug!("print-type-size t: `{:?}` record closure", layout.ty);
837 record(DataTypeKind::Closure, false, None, vec![]);
841 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
846 fn variant_info_for_adt<'tcx>(
847 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
848 layout: TyAndLayout<'tcx>,
849 adt_def: AdtDef<'tcx>,
850 ) -> (Vec<VariantInfo>, Option<Size>) {
851 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
852 let mut min_size = Size::ZERO;
853 let field_info: Vec<_> = flds
857 let field_layout = layout.field(cx, i);
858 let offset = layout.fields.offset(i);
859 min_size = min_size.max(offset + field_layout.size);
862 offset: offset.bytes(),
863 size: field_layout.size.bytes(),
864 align: field_layout.align.abi.bytes(),
871 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
872 align: layout.align.abi.bytes(),
873 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
878 match layout.variants {
879 Variants::Single { index } => {
880 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
881 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
882 let variant_def = &adt_def.variant(index);
883 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
884 (vec![build_variant_info(Some(variant_def.name), &fields, layout)], None)
890 Variants::Multiple { tag, ref tag_encoding, .. } => {
892 "print-type-size `{:#?}` adt general variants def {}",
894 adt_def.variants().len()
896 let variant_infos: Vec<_> = adt_def
899 .map(|(i, variant_def)| {
900 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
901 build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
908 TagEncoding::Direct => Some(tag.size(cx)),
916 fn variant_info_for_generator<'tcx>(
917 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
918 layout: TyAndLayout<'tcx>,
920 substs: ty::SubstsRef<'tcx>,
921 ) -> (Vec<VariantInfo>, Option<Size>) {
922 let Variants::Multiple { tag, ref tag_encoding, .. } = layout.variants else {
923 return (vec![], None);
926 let (generator, state_specific_names) = cx.tcx.generator_layout_and_saved_local_names(def_id);
927 let upvar_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
929 let mut upvars_size = Size::ZERO;
930 let upvar_fields: Vec<_> = substs
935 .map(|(field_idx, (_, name))| {
936 let field_layout = layout.field(cx, field_idx);
937 let offset = layout.fields.offset(field_idx);
938 upvars_size = upvars_size.max(offset + field_layout.size);
940 name: Symbol::intern(&name),
941 offset: offset.bytes(),
942 size: field_layout.size.bytes(),
943 align: field_layout.align.abi.bytes(),
948 let variant_infos: Vec<_> = generator
951 .map(|(variant_idx, variant_def)| {
952 let variant_layout = layout.for_variant(cx, variant_idx);
953 let mut variant_size = Size::ZERO;
954 let fields = variant_def
957 .map(|(field_idx, local)| {
958 let field_layout = variant_layout.field(cx, field_idx);
959 let offset = variant_layout.fields.offset(field_idx);
960 // The struct is as large as the last field's end
961 variant_size = variant_size.max(offset + field_layout.size);
963 name: state_specific_names.get(*local).copied().flatten().unwrap_or(
964 Symbol::intern(&format!(".generator_field{}", local.as_usize())),
966 offset: offset.bytes(),
967 size: field_layout.size.bytes(),
968 align: field_layout.align.abi.bytes(),
971 .chain(upvar_fields.iter().copied())
974 // If the variant has no state-specific fields, then it's the size of the upvars.
975 if variant_size == Size::ZERO {
976 variant_size = upvars_size;
978 // We need to add the discriminant size back into min_size, since it is subtracted
979 // later during printing.
980 variant_size += match tag_encoding {
981 TagEncoding::Direct => tag.size(cx),
986 name: Some(Symbol::intern(&ty::GeneratorSubsts::variant_name(variant_idx))),
987 kind: SizeKind::Exact,
988 size: variant_size.bytes(),
989 align: variant_layout.align.abi.bytes(),
997 TagEncoding::Direct => Some(tag.size(cx)),