2 use rustc_index::bit_set::BitSet;
3 use rustc_index::vec::{Idx, IndexVec};
4 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
5 use rustc_middle::ty::layout::{
6 IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
8 use rustc_middle::ty::{
9 self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
11 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
12 use rustc_span::symbol::Symbol;
13 use rustc_span::DUMMY_SP;
14 use rustc_target::abi::*;
16 use std::cmp::{self, Ordering};
18 use std::num::NonZeroUsize;
21 use rand::{seq::SliceRandom, SeedableRng};
22 use rand_xoshiro::Xoshiro128StarStar;
24 use crate::layout_sanity_check::sanity_check_layout;
26 pub fn provide(providers: &mut ty::query::Providers) {
27 *providers = ty::query::Providers { layout_of, ..*providers };
30 #[instrument(skip(tcx, query), level = "debug")]
33 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
34 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
35 let (param_env, ty) = query.into_parts();
38 let param_env = param_env.with_reveal_all_normalized(tcx);
39 let unnormalized_ty = ty;
41 // FIXME: We might want to have two different versions of `layout_of`:
42 // One that can be called after typecheck has completed and can use
43 // `normalize_erasing_regions` here and another one that can be called
44 // before typecheck has completed and uses `try_normalize_erasing_regions`.
45 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
47 Err(normalization_error) => {
48 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
52 if ty != unnormalized_ty {
53 // Ensure this layout is also cached for the normalized type.
54 return tcx.layout_of(param_env.and(ty));
57 let cx = LayoutCx { tcx, param_env };
59 let layout = layout_of_uncached(&cx, ty)?;
60 let layout = TyAndLayout { ty, layout };
62 record_layout_for_printing(&cx, layout);
64 sanity_check_layout(&cx, &layout);
69 #[derive(Copy, Clone, Debug)]
71 /// A tuple, closure, or univariant which cannot be coerced to unsized.
73 /// A univariant, the last field of which may be coerced to unsized.
75 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
76 Prefixed(Size, Align),
79 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
80 // This is used to go between `memory_index` (source field order to memory order)
81 // and `inverse_memory_index` (memory order to source field order).
82 // See also `FieldsShape::Arbitrary::memory_index` for more details.
83 // FIXME(eddyb) build a better abstraction for permutations, if possible.
84 fn invert_mapping(map: &[u32]) -> Vec<u32> {
85 let mut inverse = vec![0; map.len()];
86 for i in 0..map.len() {
87 inverse[map[i] as usize] = i as u32;
92 fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
93 let dl = cx.data_layout();
94 let b_align = b.align(dl);
95 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
96 let b_offset = a.size(dl).align_to(b_align.abi);
97 let size = (b_offset + b.size(dl)).align_to(align.abi);
99 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
100 // returns the last maximum.
101 let largest_niche = Niche::from_scalar(dl, b_offset, b)
103 .chain(Niche::from_scalar(dl, Size::ZERO, a))
104 .max_by_key(|niche| niche.available(dl));
107 variants: Variants::Single { index: VariantIdx::new(0) },
108 fields: FieldsShape::Arbitrary {
109 offsets: vec![Size::ZERO, b_offset],
110 memory_index: vec![0, 1],
112 abi: Abi::ScalarPair(a, b),
119 fn univariant_uninterned<'tcx>(
120 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
122 fields: &[TyAndLayout<'_>],
125 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
126 let dl = cx.data_layout();
127 let pack = repr.pack;
128 if pack.is_some() && repr.align.is_some() {
129 cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
130 return Err(LayoutError::Unknown(ty));
133 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
135 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
137 let optimize = !repr.inhibit_struct_field_reordering_opt();
139 let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
140 let optimizing = &mut inverse_memory_index[..end];
141 let effective_field_align = |f: &TyAndLayout<'_>| {
142 if let Some(pack) = pack {
143 f.align.abi.min(pack).bytes()
145 // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
146 f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
150 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
151 // the field ordering to try and catch some code making assumptions about layouts
152 // we don't guarantee
153 if repr.can_randomize_type_layout() {
154 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
155 // randomize field ordering with
156 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
158 // Shuffle the ordering of the fields
159 optimizing.shuffle(&mut rng);
161 // Otherwise we just leave things alone and actually optimize the type's fields
164 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
165 optimizing.sort_by_key(|&x| {
166 // Place ZSTs first to avoid "interesting offsets",
167 // especially with only one or two non-ZST fields.
168 let f = &fields[x as usize];
169 (!f.is_zst(), cmp::Reverse(effective_field_align(f)))
173 StructKind::Prefixed(..) => {
174 // Sort in ascending alignment so that the layout stays optimal
175 // regardless of the prefix
176 optimizing.sort_by_key(|&x| effective_field_align(&fields[x as usize]));
180 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
181 // regardless of the status of `-Z randomize-layout`
185 // inverse_memory_index holds field indices by increasing memory offset.
186 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
187 // We now write field offsets to the corresponding offset slot;
188 // field 5 with offset 0 puts 0 in offsets[5].
189 // At the bottom of this function, we invert `inverse_memory_index` to
190 // produce `memory_index` (see `invert_mapping`).
192 let mut sized = true;
193 let mut offsets = vec![Size::ZERO; fields.len()];
194 let mut offset = Size::ZERO;
195 let mut largest_niche = None;
196 let mut largest_niche_available = 0;
198 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
200 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
201 align = align.max(AbiAndPrefAlign::new(prefix_align));
202 offset = prefix_size.align_to(prefix_align);
205 for &i in &inverse_memory_index {
206 let field = fields[i as usize];
208 cx.tcx.sess.delay_span_bug(
211 "univariant: field #{} of `{}` comes after unsized field",
218 if field.is_unsized() {
222 // Invariant: offset < dl.obj_size_bound() <= 1<<61
223 let field_align = if let Some(pack) = pack {
224 field.align.min(AbiAndPrefAlign::new(pack))
228 offset = offset.align_to(field_align.abi);
229 align = align.max(field_align);
231 debug!("univariant offset: {:?} field: {:#?}", offset, field);
232 offsets[i as usize] = offset;
234 if let Some(mut niche) = field.largest_niche {
235 let available = niche.available(dl);
236 if available > largest_niche_available {
237 largest_niche_available = available;
238 niche.offset += offset;
239 largest_niche = Some(niche);
243 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
246 if let Some(repr_align) = repr.align {
247 align = align.max(AbiAndPrefAlign::new(repr_align));
250 debug!("univariant min_size: {:?}", offset);
251 let min_size = offset;
253 // As stated above, inverse_memory_index holds field indices by increasing offset.
254 // This makes it an already-sorted view of the offsets vec.
255 // To invert it, consider:
256 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
257 // Field 5 would be the first element, so memory_index is i:
258 // Note: if we didn't optimize, it's already right.
261 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
263 let size = min_size.align_to(align.abi);
264 let mut abi = Abi::Aggregate { sized };
266 // Unpack newtype ABIs and find scalar pairs.
267 if sized && size.bytes() > 0 {
268 // All other fields must be ZSTs.
269 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
271 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
272 // We have exactly one non-ZST field.
273 (Some((i, field)), None, None) => {
274 // Field fills the struct and it has a scalar or scalar pair ABI.
275 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
277 // For plain scalars, or vectors of them, we can't unpack
278 // newtypes for `#[repr(C)]`, as that affects C ABIs.
279 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
282 // But scalar pairs are Rust-specific and get
283 // treated as aggregates by C ABIs anyway.
284 Abi::ScalarPair(..) => {
292 // Two non-ZST fields, and they're both scalars.
293 (Some((i, a)), Some((j, b)), None) => {
294 match (a.abi, b.abi) {
295 (Abi::Scalar(a), Abi::Scalar(b)) => {
296 // Order by the memory placement, not source order.
297 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
302 let pair = scalar_pair(cx, a, b);
303 let pair_offsets = match pair.fields {
304 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
305 assert_eq!(memory_index, &[0, 1]);
310 if offsets[i] == pair_offsets[0]
311 && offsets[j] == pair_offsets[1]
312 && align == pair.align
315 // We can use `ScalarPair` only when it matches our
316 // already computed layout (including `#[repr(C)]`).
328 if fields.iter().any(|f| f.abi.is_uninhabited()) {
329 abi = Abi::Uninhabited;
333 variants: Variants::Single { index: VariantIdx::new(0) },
334 fields: FieldsShape::Arbitrary { offsets, memory_index },
342 fn layout_of_uncached<'tcx>(
343 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
345 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
347 let param_env = cx.param_env;
348 let dl = cx.data_layout();
349 let scalar_unit = |value: Primitive| {
350 let size = value.size(dl);
351 assert!(size.bits() <= 128);
352 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
354 let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
356 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
357 Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
359 debug_assert!(!ty.has_non_region_infer());
361 Ok(match *ty.kind() {
363 ty::Bool => tcx.intern_layout(LayoutS::scalar(
365 Scalar::Initialized {
366 value: Int(I8, false),
367 valid_range: WrappingRange { start: 0, end: 1 },
370 ty::Char => tcx.intern_layout(LayoutS::scalar(
372 Scalar::Initialized {
373 value: Int(I32, false),
374 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
377 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
378 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
379 ty::Float(fty) => scalar(match fty {
380 ty::FloatTy::F32 => F32,
381 ty::FloatTy::F64 => F64,
384 let mut ptr = scalar_unit(Pointer);
385 ptr.valid_range_mut().start = 1;
386 tcx.intern_layout(LayoutS::scalar(cx, ptr))
390 ty::Never => tcx.intern_layout(LayoutS {
391 variants: Variants::Single { index: VariantIdx::new(0) },
392 fields: FieldsShape::Primitive,
393 abi: Abi::Uninhabited,
399 // Potentially-wide pointers.
400 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
401 let mut data_ptr = scalar_unit(Pointer);
402 if !ty.is_unsafe_ptr() {
403 data_ptr.valid_range_mut().start = 1;
406 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
407 if pointee.is_sized(tcx, param_env) {
408 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
411 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
412 let metadata = match unsized_part.kind() {
414 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
416 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
418 let mut vtable = scalar_unit(Pointer);
419 vtable.valid_range_mut().start = 1;
422 _ => return Err(LayoutError::Unknown(unsized_part)),
425 // Effectively a (ptr, meta) tuple.
426 tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
429 ty::Dynamic(_, _, ty::DynStar) => {
430 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
431 data.valid_range_mut().start = 0;
432 let mut vtable = scalar_unit(Pointer);
433 vtable.valid_range_mut().start = 1;
434 tcx.intern_layout(scalar_pair(cx, data, vtable))
437 // Arrays and slices.
438 ty::Array(element, mut count) => {
439 if count.has_projections() {
440 count = tcx.normalize_erasing_regions(param_env, count);
441 if count.has_projections() {
442 return Err(LayoutError::Unknown(ty));
446 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
447 let element = cx.layout_of(element)?;
448 let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
450 let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
453 Abi::Aggregate { sized: true }
456 let largest_niche = if count != 0 { element.largest_niche } else { None };
458 tcx.intern_layout(LayoutS {
459 variants: Variants::Single { index: VariantIdx::new(0) },
460 fields: FieldsShape::Array { stride: element.size, count },
463 align: element.align,
467 ty::Slice(element) => {
468 let element = cx.layout_of(element)?;
469 tcx.intern_layout(LayoutS {
470 variants: Variants::Single { index: VariantIdx::new(0) },
471 fields: FieldsShape::Array { stride: element.size, count: 0 },
472 abi: Abi::Aggregate { sized: false },
474 align: element.align,
478 ty::Str => tcx.intern_layout(LayoutS {
479 variants: Variants::Single { index: VariantIdx::new(0) },
480 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
481 abi: Abi::Aggregate { sized: false },
488 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
489 ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
490 let mut unit = univariant_uninterned(
494 &ReprOptions::default(),
495 StructKind::AlwaysSized,
498 Abi::Aggregate { ref mut sized } => *sized = false,
501 tcx.intern_layout(unit)
504 ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
506 ty::Closure(_, ref substs) => {
507 let tys = substs.as_closure().upvar_tys();
509 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
510 &ReprOptions::default(),
511 StructKind::AlwaysSized,
517 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
520 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
521 &ReprOptions::default(),
526 // SIMD vector types.
527 ty::Adt(def, substs) if def.repr().simd() => {
528 if !def.is_struct() {
529 // Should have yielded E0517 by now.
530 tcx.sess.delay_span_bug(
532 "#[repr(simd)] was applied to an ADT that is not a struct",
534 return Err(LayoutError::Unknown(ty));
537 // Supported SIMD vectors are homogeneous ADTs with at least one field:
539 // * #[repr(simd)] struct S(T, T, T, T);
540 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
541 // * #[repr(simd)] struct S([T; 4])
543 // where T is a primitive scalar (integer/float/pointer).
545 // SIMD vectors with zero fields are not supported.
546 // (should be caught by typeck)
547 if def.non_enum_variant().fields.is_empty() {
548 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
551 // Type of the first ADT field:
552 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
554 // Heterogeneous SIMD vectors are not supported:
555 // (should be caught by typeck)
556 for fi in &def.non_enum_variant().fields {
557 if fi.ty(tcx, substs) != f0_ty {
558 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
562 // The element type and number of elements of the SIMD vector
563 // are obtained from:
565 // * the element type and length of the single array field, if
566 // the first field is of array type, or
568 // * the homogeneous field type and the number of fields.
569 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
570 // First ADT field is an array:
572 // SIMD vectors with multiple array fields are not supported:
573 // (should be caught by typeck)
574 if def.non_enum_variant().fields.len() != 1 {
575 tcx.sess.fatal(&format!(
576 "monomorphising SIMD type `{}` with more than one array field",
581 // Extract the number of elements from the layout of the array field:
582 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
583 return Err(LayoutError::Unknown(ty));
586 (*e_ty, *count, true)
588 // First ADT field is not an array:
589 (f0_ty, def.non_enum_variant().fields.len() as _, false)
592 // SIMD vectors of zero length are not supported.
593 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
596 // Can't be caught in typeck if the array length is generic.
598 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
599 } else if e_len > MAX_SIMD_LANES {
600 tcx.sess.fatal(&format!(
601 "monomorphising SIMD type `{}` of length greater than {}",
606 // Compute the ABI of the element type:
607 let e_ly = cx.layout_of(e_ty)?;
608 let Abi::Scalar(e_abi) = e_ly.abi else {
609 // This error isn't caught in typeck, e.g., if
610 // the element type of the vector is generic.
611 tcx.sess.fatal(&format!(
612 "monomorphising SIMD type `{}` with a non-primitive-scalar \
613 (integer/float/pointer) element type `{}`",
618 // Compute the size and alignment of the vector:
619 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
620 let align = dl.vector_align(size);
621 let size = size.align_to(align.abi);
623 // Compute the placement of the vector fields:
624 let fields = if is_array {
625 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
627 FieldsShape::Array { stride: e_ly.size, count: e_len }
630 tcx.intern_layout(LayoutS {
631 variants: Variants::Single { index: VariantIdx::new(0) },
633 abi: Abi::Vector { element: e_abi, count: e_len },
634 largest_niche: e_ly.largest_niche,
641 ty::Adt(def, substs) => {
642 // Cache the field layouts.
649 .map(|field| cx.layout_of(field.ty(tcx, substs)))
650 .collect::<Result<Vec<_>, _>>()
652 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
655 if def.repr().pack.is_some() && def.repr().align.is_some() {
656 cx.tcx.sess.delay_span_bug(
657 tcx.def_span(def.did()),
658 "union cannot be packed and aligned",
660 return Err(LayoutError::Unknown(ty));
664 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
666 if let Some(repr_align) = def.repr().align {
667 align = align.max(AbiAndPrefAlign::new(repr_align));
670 let optimize = !def.repr().inhibit_union_abi_opt();
671 let mut size = Size::ZERO;
672 let mut abi = Abi::Aggregate { sized: true };
673 let index = VariantIdx::new(0);
674 for field in &variants[index] {
675 assert!(field.is_sized());
676 align = align.max(field.align);
678 // If all non-ZST fields have the same ABI, forward this ABI
679 if optimize && !field.is_zst() {
680 // Discard valid range information and allow undef
681 let field_abi = match field.abi {
682 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
683 Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
684 Abi::Vector { element: x, count } => {
685 Abi::Vector { element: x.to_union(), count }
687 Abi::Uninhabited | Abi::Aggregate { .. } => {
688 Abi::Aggregate { sized: true }
692 if size == Size::ZERO {
693 // first non ZST: initialize 'abi'
695 } else if abi != field_abi {
696 // different fields have different ABI: reset to Aggregate
697 abi = Abi::Aggregate { sized: true };
701 size = cmp::max(size, field.size);
704 if let Some(pack) = def.repr().pack {
705 align = align.min(AbiAndPrefAlign::new(pack));
708 return Ok(tcx.intern_layout(LayoutS {
709 variants: Variants::Single { index },
710 fields: FieldsShape::Union(
711 NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
716 size: size.align_to(align.abi),
720 // A variant is absent if it's uninhabited and only has ZST fields.
721 // Present uninhabited variants only require space for their fields,
722 // but *not* an encoding of the discriminant (e.g., a tag value).
723 // See issue #49298 for more details on the need to leave space
724 // for non-ZST uninhabited data (mostly partial initialization).
725 let absent = |fields: &[TyAndLayout<'_>]| {
726 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
727 let is_zst = fields.iter().all(|f| f.is_zst());
728 uninhabited && is_zst
730 let (present_first, present_second) = {
731 let mut present_variants = variants
733 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
734 (present_variants.next(), present_variants.next())
736 let present_first = match present_first {
737 Some(present_first) => present_first,
738 // Uninhabited because it has no variants, or only absent ones.
739 None if def.is_enum() => {
740 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
742 // If it's a struct, still compute a layout so that we can still compute the
744 None => VariantIdx::new(0),
747 let is_struct = !def.is_enum() ||
748 // Only one variant is present.
749 (present_second.is_none() &&
750 // Representation optimizations are allowed.
751 !def.repr().inhibit_enum_layout_opt());
753 // Struct, or univariant enum equivalent to a struct.
754 // (Typechecking will reject discriminant-sizing attrs.)
756 let v = present_first;
757 let kind = if def.is_enum() || variants[v].is_empty() {
758 StructKind::AlwaysSized
760 let param_env = tcx.param_env(def.did());
761 let last_field = def.variant(v).fields.last().unwrap();
762 let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
763 if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
766 let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
767 st.variants = Variants::Single { index: v };
769 if def.is_unsafe_cell() {
770 let hide_niches = |scalar: &mut _| match scalar {
771 Scalar::Initialized { value, valid_range } => {
772 *valid_range = WrappingRange::full(value.size(dl))
774 // Already doesn't have any niches
775 Scalar::Union { .. } => {}
778 Abi::Uninhabited => {}
779 Abi::Scalar(scalar) => hide_niches(scalar),
780 Abi::ScalarPair(a, b) => {
784 Abi::Vector { element, count: _ } => hide_niches(element),
785 Abi::Aggregate { sized: _ } => {}
787 st.largest_niche = None;
788 return Ok(tcx.intern_layout(st));
791 let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
793 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
794 // the asserts ensure that we are not using the
795 // `#[rustc_layout_scalar_valid_range(n)]`
796 // attribute to widen the range of anything as that would probably
797 // result in UB somewhere
798 // FIXME(eddyb) the asserts are probably not needed,
799 // as larger validity ranges would result in missed
800 // optimizations, *not* wrongly assuming the inner
801 // value is valid. e.g. unions enlarge validity ranges,
802 // because the values may be uninitialized.
803 if let Bound::Included(start) = start {
804 // FIXME(eddyb) this might be incorrect - it doesn't
805 // account for wrap-around (end < start) ranges.
806 let valid_range = scalar.valid_range_mut();
807 assert!(valid_range.start <= start);
808 valid_range.start = start;
810 if let Bound::Included(end) = end {
811 // FIXME(eddyb) this might be incorrect - it doesn't
812 // account for wrap-around (end < start) ranges.
813 let valid_range = scalar.valid_range_mut();
814 assert!(valid_range.end >= end);
815 valid_range.end = end;
818 // Update `largest_niche` if we have introduced a larger niche.
819 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
820 if let Some(niche) = niche {
821 match st.largest_niche {
822 Some(largest_niche) => {
823 // Replace the existing niche even if they're equal,
824 // because this one is at a lower offset.
825 if largest_niche.available(dl) <= niche.available(dl) {
826 st.largest_niche = Some(niche);
829 None => st.largest_niche = Some(niche),
834 start == Bound::Unbounded && end == Bound::Unbounded,
835 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
841 return Ok(tcx.intern_layout(st));
844 // At this point, we have handled all unions and
845 // structs. (We have also handled univariant enums
846 // that allow representation optimization.)
847 assert!(def.is_enum());
849 // Until we've decided whether to use the tagged or
850 // niche filling LayoutS, we don't want to intern the
851 // variant layouts, so we can't store them in the
852 // overall LayoutS. Store the overall LayoutS
853 // and the variant LayoutSs here until then.
854 struct TmpLayout<'tcx> {
855 layout: LayoutS<'tcx>,
856 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
859 let calculate_niche_filling_layout =
860 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
861 // The current code for niche-filling relies on variant indices
862 // instead of actual discriminants, so enums with
863 // explicit discriminants (RFC #2363) would misbehave.
864 if def.repr().inhibit_enum_layout_opt()
868 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
873 if variants.len() < 2 {
877 let mut align = dl.aggregate_align;
878 let mut variant_layouts = variants
881 let mut st = univariant_uninterned(
886 StructKind::AlwaysSized,
888 st.variants = Variants::Single { index: j };
890 align = align.max(st.align);
894 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
896 let largest_variant_index = match variant_layouts
898 .max_by_key(|(_i, layout)| layout.size.bytes())
899 .map(|(i, _layout)| i)
901 None => return Ok(None),
905 let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
906 let needs_disc = |index: VariantIdx| {
907 index != largest_variant_index && !absent(&variants[index])
909 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
910 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
912 let count = niche_variants.size_hint().1.unwrap() as u128;
914 // Find the field with the largest niche
915 let (field_index, niche, (niche_start, niche_scalar)) = match variants
916 [largest_variant_index]
919 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
920 .max_by_key(|(_, niche)| niche.available(dl))
921 .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
923 None => return Ok(None),
927 let niche_offset = niche.offset
928 + variant_layouts[largest_variant_index].fields.offset(field_index);
929 let niche_size = niche.value.size(dl);
930 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
932 let all_variants_fit =
933 variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
934 if i == largest_variant_index {
938 layout.largest_niche = None;
940 if layout.size <= niche_offset {
941 // This variant will fit before the niche.
945 // Determine if it'll fit after the niche.
946 let this_align = layout.align.abi;
947 let this_offset = (niche_offset + niche_size).align_to(this_align);
949 if this_offset + layout.size > size {
953 // It'll fit, but we need to make some adjustments.
954 match layout.fields {
955 FieldsShape::Arbitrary { ref mut offsets, .. } => {
956 for (j, offset) in offsets.iter_mut().enumerate() {
957 if !variants[i][j].is_zst() {
958 *offset += this_offset;
963 panic!("Layout of fields should be Arbitrary for variants")
967 // It can't be a Scalar or ScalarPair because the offset isn't 0.
968 if !layout.abi.is_uninhabited() {
969 layout.abi = Abi::Aggregate { sized: true };
971 layout.size += this_offset;
976 if !all_variants_fit {
980 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
982 let others_zst = variant_layouts
984 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
985 let same_size = size == variant_layouts[largest_variant_index].size;
986 let same_align = align == variant_layouts[largest_variant_index].align;
988 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
990 } else if same_size && same_align && others_zst {
991 match variant_layouts[largest_variant_index].abi {
992 // When the total alignment and size match, we can use the
993 // same ABI as the scalar variant with the reserved niche.
994 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
995 Abi::ScalarPair(first, second) => {
996 // Only the niche is guaranteed to be initialised,
997 // so use union layouts for the other primitive.
998 if niche_offset == Size::ZERO {
999 Abi::ScalarPair(niche_scalar, second.to_union())
1001 Abi::ScalarPair(first.to_union(), niche_scalar)
1004 _ => Abi::Aggregate { sized: true },
1007 Abi::Aggregate { sized: true }
1010 let layout = LayoutS {
1011 variants: Variants::Multiple {
1013 tag_encoding: TagEncoding::Niche {
1014 untagged_variant: largest_variant_index,
1019 variants: IndexVec::new(),
1021 fields: FieldsShape::Arbitrary {
1022 offsets: vec![niche_offset],
1023 memory_index: vec![0],
1031 Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1034 let niche_filling_layout = calculate_niche_filling_layout()?;
1036 let (mut min, mut max) = (i128::MAX, i128::MIN);
1037 let discr_type = def.repr().discr_type();
1038 let bits = Integer::from_attr(cx, discr_type).size().bits();
1039 for (i, discr) in def.discriminants(tcx) {
1040 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1043 let mut x = discr.val as i128;
1044 if discr_type.is_signed() {
1045 // sign extend the raw representation to be an i128
1046 x = (x << (128 - bits)) >> (128 - bits);
1055 // We might have no inhabited variants, so pretend there's at least one.
1056 if (min, max) == (i128::MAX, i128::MIN) {
1060 assert!(min <= max, "discriminant range is {}...{}", min, max);
1061 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1063 let mut align = dl.aggregate_align;
1064 let mut size = Size::ZERO;
1066 // We're interested in the smallest alignment, so start large.
1067 let mut start_align = Align::from_bytes(256).unwrap();
1068 assert_eq!(Integer::for_align(dl, start_align), None);
1070 // repr(C) on an enum tells us to make a (tag, union) layout,
1071 // so we need to grow the prefix alignment to be at least
1072 // the alignment of the union. (This value is used both for
1073 // determining the alignment of the overall enum, and the
1074 // determining the alignment of the payload after the tag.)
1075 let mut prefix_align = min_ity.align(dl).abi;
1077 for fields in &variants {
1078 for field in fields {
1079 prefix_align = prefix_align.max(field.align.abi);
1084 // Create the set of structs that represent each variant.
1085 let mut layout_variants = variants
1087 .map(|(i, field_layouts)| {
1088 let mut st = univariant_uninterned(
1093 StructKind::Prefixed(min_ity.size(), prefix_align),
1095 st.variants = Variants::Single { index: i };
1096 // Find the first field we can't move later
1097 // to make room for a larger discriminant.
1098 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1099 if !field.is_zst() || field.align.abi.bytes() != 1 {
1100 start_align = start_align.min(field.align.abi);
1104 size = cmp::max(size, st.size);
1105 align = align.max(st.align);
1108 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1110 // Align the maximum variant size to the largest alignment.
1111 size = size.align_to(align.abi);
1113 if size.bytes() >= dl.obj_size_bound() {
1114 return Err(LayoutError::SizeOverflow(ty));
1117 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1118 if typeck_ity < min_ity {
1119 // It is a bug if Layout decided on a greater discriminant size than typeck for
1120 // some reason at this point (based on values discriminant can take on). Mostly
1121 // because this discriminant will be loaded, and then stored into variable of
1122 // type calculated by typeck. Consider such case (a bug): typeck decided on
1123 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1124 // discriminant values. That would be a bug, because then, in codegen, in order
1125 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1126 // space necessary to represent would have to be discarded (or layout is wrong
1127 // on thinking it needs 16 bits)
1129 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1133 // However, it is fine to make discr type however large (as an optimisation)
1134 // after this point – we’ll just truncate the value we load in codegen.
1137 // Check to see if we should use a different type for the
1138 // discriminant. We can safely use a type with the same size
1139 // as the alignment of the first field of each variant.
1140 // We increase the size of the discriminant to avoid LLVM copying
1141 // padding when it doesn't need to. This normally causes unaligned
1142 // load/stores and excessive memcpy/memset operations. By using a
1143 // bigger integer size, LLVM can be sure about its contents and
1144 // won't be so conservative.
1146 // Use the initial field alignment
1147 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1150 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1153 // If the alignment is not larger than the chosen discriminant size,
1154 // don't use the alignment as the final size.
1158 // Patch up the variants' first few fields.
1159 let old_ity_size = min_ity.size();
1160 let new_ity_size = ity.size();
1161 for variant in &mut layout_variants {
1162 match variant.fields {
1163 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1165 if *i <= old_ity_size {
1166 assert_eq!(*i, old_ity_size);
1170 // We might be making the struct larger.
1171 if variant.size <= old_ity_size {
1172 variant.size = new_ity_size;
1180 let tag_mask = ity.size().unsigned_int_max();
1181 let tag = Scalar::Initialized {
1182 value: Int(ity, signed),
1183 valid_range: WrappingRange {
1184 start: (min as u128 & tag_mask),
1185 end: (max as u128 & tag_mask),
1188 let mut abi = Abi::Aggregate { sized: true };
1190 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1191 abi = Abi::Uninhabited;
1192 } else if tag.size(dl) == size {
1193 // Make sure we only use scalar layout when the enum is entirely its
1194 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1195 abi = Abi::Scalar(tag);
1197 // Try to use a ScalarPair for all tagged enums.
1198 let mut common_prim = None;
1199 let mut common_prim_initialized_in_all_variants = true;
1200 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1201 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1204 let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1205 let (field, offset) = match (fields.next(), fields.next()) {
1207 common_prim_initialized_in_all_variants = false;
1210 (Some(pair), None) => pair,
1216 let prim = match field.abi {
1217 Abi::Scalar(scalar) => {
1218 common_prim_initialized_in_all_variants &=
1219 matches!(scalar, Scalar::Initialized { .. });
1227 if let Some(pair) = common_prim {
1228 // This is pretty conservative. We could go fancier
1229 // by conflating things like i32 and u32, or even
1230 // realising that (u8, u8) could just cohabit with
1232 if pair != (prim, offset) {
1237 common_prim = Some((prim, offset));
1240 if let Some((prim, offset)) = common_prim {
1241 let prim_scalar = if common_prim_initialized_in_all_variants {
1244 // Common prim might be uninit.
1245 Scalar::Union { value: prim }
1247 let pair = scalar_pair(cx, tag, prim_scalar);
1248 let pair_offsets = match pair.fields {
1249 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1250 assert_eq!(memory_index, &[0, 1]);
1255 if pair_offsets[0] == Size::ZERO
1256 && pair_offsets[1] == *offset
1257 && align == pair.align
1258 && size == pair.size
1260 // We can use `ScalarPair` only when it matches our
1261 // already computed layout (including `#[repr(C)]`).
1267 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1268 // variants to ensure they are consistent. This is because a downcast is
1269 // semantically a NOP, and thus should not affect layout.
1270 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1271 for variant in &mut layout_variants {
1272 // We only do this for variants with fields; the others are not accessed anyway.
1273 // Also do not overwrite any already existing "clever" ABIs.
1274 if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
1276 // Also need to bump up the size and alignment, so that the entire value fits in here.
1277 variant.size = cmp::max(variant.size, size);
1278 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1283 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1285 let tagged_layout = LayoutS {
1286 variants: Variants::Multiple {
1288 tag_encoding: TagEncoding::Direct,
1290 variants: IndexVec::new(),
1292 fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
1299 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1301 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1303 // Pick the smaller layout; otherwise,
1304 // pick the layout with the larger niche; otherwise,
1305 // pick tagged as it has simpler codegen.
1307 let niche_size = |tmp_l: &TmpLayout<'_>| {
1308 tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1311 tl.layout.size.cmp(&nl.layout.size),
1312 niche_size(&tl).cmp(&niche_size(&nl)),
1315 (Equal, Less) => nl,
1322 // Now we can intern the variant layouts and store them in the enum layout.
1323 best_layout.layout.variants = match best_layout.layout.variants {
1324 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1328 variants: best_layout
1331 .map(|layout| tcx.intern_layout(layout))
1337 tcx.intern_layout(best_layout.layout)
1340 // Types with no meaningful known layout.
1341 ty::Projection(_) | ty::Opaque(..) => {
1342 // NOTE(eddyb) `layout_of` query should've normalized these away,
1343 // if that was possible, so there's no reason to try again here.
1344 return Err(LayoutError::Unknown(ty));
1347 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1348 bug!("Layout::compute: unexpected type `{}`", ty)
1351 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1352 return Err(LayoutError::Unknown(ty));
1357 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1358 #[derive(Clone, Debug, PartialEq)]
1359 enum SavedLocalEligibility {
1361 Assigned(VariantIdx),
1362 // FIXME: Use newtype_index so we aren't wasting bytes
1363 Ineligible(Option<u32>),
1366 // When laying out generators, we divide our saved local fields into two
1367 // categories: overlap-eligible and overlap-ineligible.
1369 // Those fields which are ineligible for overlap go in a "prefix" at the
1370 // beginning of the layout, and always have space reserved for them.
1372 // Overlap-eligible fields are only assigned to one variant, so we lay
1373 // those fields out for each variant and put them right after the
1376 // Finally, in the layout details, we point to the fields from the
1377 // variants they are assigned to. It is possible for some fields to be
1378 // included in multiple variants. No field ever "moves around" in the
1379 // layout; its offset is always the same.
1381 // Also included in the layout are the upvars and the discriminant.
1382 // These are included as fields on the "outer" layout; they are not part
1385 /// Compute the eligibility and assignment of each local.
1386 fn generator_saved_local_eligibility<'tcx>(
1387 info: &GeneratorLayout<'tcx>,
1388 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1389 use SavedLocalEligibility::*;
1391 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1392 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1394 // The saved locals not eligible for overlap. These will get
1395 // "promoted" to the prefix of our generator.
1396 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1398 // Figure out which of our saved locals are fields in only
1399 // one variant. The rest are deemed ineligible for overlap.
1400 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1401 for local in fields {
1402 match assignments[*local] {
1404 assignments[*local] = Assigned(variant_index);
1407 // We've already seen this local at another suspension
1408 // point, so it is no longer a candidate.
1410 "removing local {:?} in >1 variant ({:?}, {:?})",
1415 ineligible_locals.insert(*local);
1416 assignments[*local] = Ineligible(None);
1423 // Next, check every pair of eligible locals to see if they
1425 for local_a in info.storage_conflicts.rows() {
1426 let conflicts_a = info.storage_conflicts.count(local_a);
1427 if ineligible_locals.contains(local_a) {
1431 for local_b in info.storage_conflicts.iter(local_a) {
1432 // local_a and local_b are storage live at the same time, therefore they
1433 // cannot overlap in the generator layout. The only way to guarantee
1434 // this is if they are in the same variant, or one is ineligible
1435 // (which means it is stored in every variant).
1436 if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
1440 // If they conflict, we will choose one to make ineligible.
1441 // This is not always optimal; it's just a greedy heuristic that
1442 // seems to produce good results most of the time.
1443 let conflicts_b = info.storage_conflicts.count(local_b);
1444 let (remove, other) =
1445 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1446 ineligible_locals.insert(remove);
1447 assignments[remove] = Ineligible(None);
1448 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1452 // Count the number of variants in use. If only one of them, then it is
1453 // impossible to overlap any locals in our layout. In this case it's
1454 // always better to make the remaining locals ineligible, so we can
1455 // lay them out with the other locals in the prefix and eliminate
1456 // unnecessary padding bytes.
1458 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1459 for assignment in &assignments {
1460 if let Assigned(idx) = assignment {
1461 used_variants.insert(*idx);
1464 if used_variants.count() < 2 {
1465 for assignment in assignments.iter_mut() {
1466 *assignment = Ineligible(None);
1468 ineligible_locals.insert_all();
1472 // Write down the order of our locals that will be promoted to the prefix.
1474 for (idx, local) in ineligible_locals.iter().enumerate() {
1475 assignments[local] = Ineligible(Some(idx as u32));
1478 debug!("generator saved local assignments: {:?}", assignments);
1480 (ineligible_locals, assignments)
1483 /// Compute the full generator layout.
1484 fn generator_layout<'tcx>(
1485 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1487 def_id: hir::def_id::DefId,
1488 substs: SubstsRef<'tcx>,
1489 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1490 use SavedLocalEligibility::*;
1492 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1494 let Some(info) = tcx.generator_layout(def_id) else {
1495 return Err(LayoutError::Unknown(ty));
1497 let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
1499 // Build a prefix layout, including "promoting" all ineligible
1500 // locals as part of the prefix. We compute the layout of all of
1501 // these fields at once to get optimal packing.
1502 let tag_index = substs.as_generator().prefix_tys().count();
1504 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1505 let max_discr = (info.variant_fields.len() - 1) as u128;
1506 let discr_int = Integer::fit_unsigned(max_discr);
1507 let discr_int_ty = discr_int.to_ty(tcx, false);
1508 let tag = Scalar::Initialized {
1509 value: Primitive::Int(discr_int, false),
1510 valid_range: WrappingRange { start: 0, end: max_discr },
1512 let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
1513 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1515 let promoted_layouts = ineligible_locals
1517 .map(|local| subst_field(info.field_tys[local]))
1518 .map(|ty| tcx.mk_maybe_uninit(ty))
1519 .map(|ty| cx.layout_of(ty));
1520 let prefix_layouts = substs
1523 .map(|ty| cx.layout_of(ty))
1524 .chain(iter::once(Ok(tag_layout)))
1525 .chain(promoted_layouts)
1526 .collect::<Result<Vec<_>, _>>()?;
1527 let prefix = univariant_uninterned(
1531 &ReprOptions::default(),
1532 StructKind::AlwaysSized,
1535 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1537 // Split the prefix layout into the "outer" fields (upvars and
1538 // discriminant) and the "promoted" fields. Promoted fields will
1539 // get included in each variant that requested them in
1541 debug!("prefix = {:#?}", prefix);
1542 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1543 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1544 let mut inverse_memory_index = invert_mapping(&memory_index);
1546 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1547 // "outer" and "promoted" fields respectively.
1548 let b_start = (tag_index + 1) as u32;
1549 let offsets_b = offsets.split_off(b_start as usize);
1550 let offsets_a = offsets;
1552 // Disentangle the "a" and "b" components of `inverse_memory_index`
1553 // by preserving the order but keeping only one disjoint "half" each.
1554 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1555 let inverse_memory_index_b: Vec<_> =
1556 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1557 inverse_memory_index.retain(|&i| i < b_start);
1558 let inverse_memory_index_a = inverse_memory_index;
1560 // Since `inverse_memory_index_{a,b}` each only refer to their
1561 // respective fields, they can be safely inverted
1562 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1563 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1566 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1567 (outer_fields, offsets_b, memory_index_b)
1572 let mut size = prefix.size;
1573 let mut align = prefix.align;
1577 .map(|(index, variant_fields)| {
1578 // Only include overlap-eligible fields when we compute our variant layout.
1579 let variant_only_tys = variant_fields
1581 .filter(|local| match assignments[**local] {
1582 Unassigned => bug!(),
1583 Assigned(v) if v == index => true,
1584 Assigned(_) => bug!("assignment does not match variant"),
1585 Ineligible(_) => false,
1587 .map(|local| subst_field(info.field_tys[*local]));
1589 let mut variant = univariant_uninterned(
1592 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1593 &ReprOptions::default(),
1594 StructKind::Prefixed(prefix_size, prefix_align.abi),
1596 variant.variants = Variants::Single { index };
1598 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1602 // Now, stitch the promoted and variant-only fields back together in
1603 // the order they are mentioned by our GeneratorLayout.
1604 // Because we only use some subset (that can differ between variants)
1605 // of the promoted fields, we can't just pick those elements of the
1606 // `promoted_memory_index` (as we'd end up with gaps).
1607 // So instead, we build an "inverse memory_index", as if all of the
1608 // promoted fields were being used, but leave the elements not in the
1609 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1610 // obtain a valid (bijective) mapping.
1611 const INVALID_FIELD_IDX: u32 = !0;
1612 let mut combined_inverse_memory_index =
1613 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1614 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1615 let combined_offsets = variant_fields
1619 let (offset, memory_index) = match assignments[*local] {
1620 Unassigned => bug!(),
1622 let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1623 (offset, promoted_memory_index.len() as u32 + memory_index)
1625 Ineligible(field_idx) => {
1626 let field_idx = field_idx.unwrap() as usize;
1627 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1630 combined_inverse_memory_index[memory_index as usize] = i as u32;
1635 // Remove the unused slots and invert the mapping to obtain the
1636 // combined `memory_index` (also see previous comment).
1637 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1638 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1640 variant.fields = FieldsShape::Arbitrary {
1641 offsets: combined_offsets,
1642 memory_index: combined_memory_index,
1645 size = size.max(variant.size);
1646 align = align.max(variant.align);
1647 Ok(tcx.intern_layout(variant))
1649 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1651 size = size.align_to(align.abi);
1653 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1656 Abi::Aggregate { sized: true }
1659 let layout = tcx.intern_layout(LayoutS {
1660 variants: Variants::Multiple {
1662 tag_encoding: TagEncoding::Direct,
1663 tag_field: tag_index,
1666 fields: outer_fields,
1668 largest_niche: prefix.largest_niche,
1672 debug!("generator layout ({:?}): {:#?}", ty, layout);
1676 /// This is invoked by the `layout_of` query to record the final
1677 /// layout of each type.
1679 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
1680 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1681 // for dumping later.
1682 if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
1683 record_layout_for_printing_outlined(cx, layout)
1687 fn record_layout_for_printing_outlined<'tcx>(
1688 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1689 layout: TyAndLayout<'tcx>,
1691 // Ignore layouts that are done with non-empty environments or
1692 // non-monomorphic layouts, as the user only wants to see the stuff
1693 // resulting from the final codegen session.
1694 if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
1698 // (delay format until we actually need it)
1699 let record = |kind, packed, opt_discr_size, variants| {
1700 let type_desc = format!("{:?}", layout.ty);
1701 cx.tcx.sess.code_stats.record_type_size(
1712 let adt_def = match *layout.ty.kind() {
1713 ty::Adt(ref adt_def, _) => {
1714 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1718 ty::Closure(..) => {
1719 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1720 record(DataTypeKind::Closure, false, None, vec![]);
1725 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1730 let adt_kind = adt_def.adt_kind();
1731 let adt_packed = adt_def.repr().pack.is_some();
1733 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1734 let mut min_size = Size::ZERO;
1735 let field_info: Vec<_> = flds
1739 let field_layout = layout.field(cx, i);
1740 let offset = layout.fields.offset(i);
1741 let field_end = offset + field_layout.size;
1742 if min_size < field_end {
1743 min_size = field_end;
1747 offset: offset.bytes(),
1748 size: field_layout.size.bytes(),
1749 align: field_layout.align.abi.bytes(),
1756 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1757 align: layout.align.abi.bytes(),
1758 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1763 match layout.variants {
1764 Variants::Single { index } => {
1765 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1766 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
1767 let variant_def = &adt_def.variant(index);
1768 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1773 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1776 // (This case arises for *empty* enums; so give it
1778 record(adt_kind.into(), adt_packed, None, vec![]);
1782 Variants::Multiple { tag, ref tag_encoding, .. } => {
1784 "print-type-size `{:#?}` adt general variants def {}",
1786 adt_def.variants().len()
1788 let variant_infos: Vec<_> = adt_def
1791 .map(|(i, variant_def)| {
1792 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1793 build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
1799 match tag_encoding {
1800 TagEncoding::Direct => Some(tag.size(cx)),