2 use rustc_index::bit_set::BitSet;
3 use rustc_index::vec::{Idx, IndexVec};
4 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
5 use rustc_middle::ty::layout::{
6 IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
8 use rustc_middle::ty::{
9 self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
11 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
12 use rustc_span::symbol::Symbol;
13 use rustc_span::DUMMY_SP;
14 use rustc_target::abi::*;
16 use std::cmp::{self, Ordering};
18 use std::num::NonZeroUsize;
21 use rand::{seq::SliceRandom, SeedableRng};
22 use rand_xoshiro::Xoshiro128StarStar;
24 use crate::layout_sanity_check::sanity_check_layout;
26 pub fn provide(providers: &mut ty::query::Providers) {
27 *providers = ty::query::Providers { layout_of, ..*providers };
30 #[instrument(skip(tcx, query), level = "debug")]
33 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
34 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
35 let (param_env, ty) = query.into_parts();
38 let param_env = param_env.with_reveal_all_normalized(tcx);
39 let unnormalized_ty = ty;
41 // FIXME: We might want to have two different versions of `layout_of`:
42 // One that can be called after typecheck has completed and can use
43 // `normalize_erasing_regions` here and another one that can be called
44 // before typecheck has completed and uses `try_normalize_erasing_regions`.
45 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
47 Err(normalization_error) => {
48 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
52 if ty != unnormalized_ty {
53 // Ensure this layout is also cached for the normalized type.
54 return tcx.layout_of(param_env.and(ty));
57 let cx = LayoutCx { tcx, param_env };
59 let layout = layout_of_uncached(&cx, ty)?;
60 let layout = TyAndLayout { ty, layout };
62 record_layout_for_printing(&cx, layout);
64 sanity_check_layout(&cx, &layout);
69 #[derive(Copy, Clone, Debug)]
71 /// A tuple, closure, or univariant which cannot be coerced to unsized.
73 /// A univariant, the last field of which may be coerced to unsized.
75 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
76 Prefixed(Size, Align),
79 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
80 // This is used to go between `memory_index` (source field order to memory order)
81 // and `inverse_memory_index` (memory order to source field order).
82 // See also `FieldsShape::Arbitrary::memory_index` for more details.
83 // FIXME(eddyb) build a better abstraction for permutations, if possible.
84 fn invert_mapping(map: &[u32]) -> Vec<u32> {
85 let mut inverse = vec![0; map.len()];
86 for i in 0..map.len() {
87 inverse[map[i] as usize] = i as u32;
92 fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
93 let dl = cx.data_layout();
94 let b_align = b.align(dl);
95 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
96 let b_offset = a.size(dl).align_to(b_align.abi);
97 let size = (b_offset + b.size(dl)).align_to(align.abi);
99 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
100 // returns the last maximum.
101 let largest_niche = Niche::from_scalar(dl, b_offset, b)
103 .chain(Niche::from_scalar(dl, Size::ZERO, a))
104 .max_by_key(|niche| niche.available(dl));
107 variants: Variants::Single { index: VariantIdx::new(0) },
108 fields: FieldsShape::Arbitrary {
109 offsets: vec![Size::ZERO, b_offset],
110 memory_index: vec![0, 1],
112 abi: Abi::ScalarPair(a, b),
119 fn univariant_uninterned<'tcx>(
120 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
122 fields: &[TyAndLayout<'_>],
125 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
126 let dl = cx.data_layout();
127 let pack = repr.pack;
128 if pack.is_some() && repr.align.is_some() {
129 cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
130 return Err(LayoutError::Unknown(ty));
133 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
135 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
137 let optimize = !repr.inhibit_struct_field_reordering_opt();
139 let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
140 let optimizing = &mut inverse_memory_index[..end];
141 let effective_field_align = |f: &TyAndLayout<'_>| {
142 if let Some(pack) = pack {
143 // return the packed alignment in bytes
144 f.align.abi.min(pack).bytes()
146 // returns log2(effective-align).
147 // This is ok since `pack` applies to all fields equally.
148 // The calculation assumes that size is an integer multiple of align, except for ZSTs.
150 // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
151 f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
155 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
156 // the field ordering to try and catch some code making assumptions about layouts
157 // we don't guarantee
158 if repr.can_randomize_type_layout() {
159 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
160 // randomize field ordering with
161 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
163 // Shuffle the ordering of the fields
164 optimizing.shuffle(&mut rng);
166 // Otherwise we just leave things alone and actually optimize the type's fields
169 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
170 optimizing.sort_by_key(|&x| {
171 // Place ZSTs first to avoid "interesting offsets",
172 // especially with only one or two non-ZST fields.
173 // Then place largest alignments first, largest niches within an alignment group last
174 let f = &fields[x as usize];
175 let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
176 (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
180 StructKind::Prefixed(..) => {
181 // Sort in ascending alignment so that the layout stays optimal
182 // regardless of the prefix.
183 // And put the largest niche in an alignment group at the end
184 // so it can be used as discriminant in jagged enums
185 optimizing.sort_by_key(|&x| {
186 let f = &fields[x as usize];
187 let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
188 (effective_field_align(f), niche_size)
193 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
194 // regardless of the status of `-Z randomize-layout`
198 // inverse_memory_index holds field indices by increasing memory offset.
199 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
200 // We now write field offsets to the corresponding offset slot;
201 // field 5 with offset 0 puts 0 in offsets[5].
202 // At the bottom of this function, we invert `inverse_memory_index` to
203 // produce `memory_index` (see `invert_mapping`).
205 let mut sized = true;
206 let mut offsets = vec![Size::ZERO; fields.len()];
207 let mut offset = Size::ZERO;
208 let mut largest_niche = None;
209 let mut largest_niche_available = 0;
211 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
213 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
214 align = align.max(AbiAndPrefAlign::new(prefix_align));
215 offset = prefix_size.align_to(prefix_align);
218 for &i in &inverse_memory_index {
219 let field = fields[i as usize];
221 cx.tcx.sess.delay_span_bug(
224 "univariant: field #{} of `{}` comes after unsized field",
231 if field.is_unsized() {
235 // Invariant: offset < dl.obj_size_bound() <= 1<<61
236 let field_align = if let Some(pack) = pack {
237 field.align.min(AbiAndPrefAlign::new(pack))
241 offset = offset.align_to(field_align.abi);
242 align = align.max(field_align);
244 debug!("univariant offset: {:?} field: {:#?}", offset, field);
245 offsets[i as usize] = offset;
247 if let Some(mut niche) = field.largest_niche {
248 let available = niche.available(dl);
249 if available > largest_niche_available {
250 largest_niche_available = available;
251 niche.offset += offset;
252 largest_niche = Some(niche);
256 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
259 if let Some(repr_align) = repr.align {
260 align = align.max(AbiAndPrefAlign::new(repr_align));
263 debug!("univariant min_size: {:?}", offset);
264 let min_size = offset;
266 // As stated above, inverse_memory_index holds field indices by increasing offset.
267 // This makes it an already-sorted view of the offsets vec.
268 // To invert it, consider:
269 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
270 // Field 5 would be the first element, so memory_index is i:
271 // Note: if we didn't optimize, it's already right.
274 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
276 let size = min_size.align_to(align.abi);
277 let mut abi = Abi::Aggregate { sized };
279 // Unpack newtype ABIs and find scalar pairs.
280 if sized && size.bytes() > 0 {
281 // All other fields must be ZSTs.
282 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
284 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
285 // We have exactly one non-ZST field.
286 (Some((i, field)), None, None) => {
287 // Field fills the struct and it has a scalar or scalar pair ABI.
288 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
290 // For plain scalars, or vectors of them, we can't unpack
291 // newtypes for `#[repr(C)]`, as that affects C ABIs.
292 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
295 // But scalar pairs are Rust-specific and get
296 // treated as aggregates by C ABIs anyway.
297 Abi::ScalarPair(..) => {
305 // Two non-ZST fields, and they're both scalars.
306 (Some((i, a)), Some((j, b)), None) => {
307 match (a.abi, b.abi) {
308 (Abi::Scalar(a), Abi::Scalar(b)) => {
309 // Order by the memory placement, not source order.
310 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
315 let pair = scalar_pair(cx, a, b);
316 let pair_offsets = match pair.fields {
317 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
318 assert_eq!(memory_index, &[0, 1]);
323 if offsets[i] == pair_offsets[0]
324 && offsets[j] == pair_offsets[1]
325 && align == pair.align
328 // We can use `ScalarPair` only when it matches our
329 // already computed layout (including `#[repr(C)]`).
341 if fields.iter().any(|f| f.abi.is_uninhabited()) {
342 abi = Abi::Uninhabited;
346 variants: Variants::Single { index: VariantIdx::new(0) },
347 fields: FieldsShape::Arbitrary { offsets, memory_index },
355 fn layout_of_uncached<'tcx>(
356 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
358 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
360 let param_env = cx.param_env;
361 let dl = cx.data_layout();
362 let scalar_unit = |value: Primitive| {
363 let size = value.size(dl);
364 assert!(size.bits() <= 128);
365 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
367 let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
369 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
370 Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
372 debug_assert!(!ty.has_non_region_infer());
374 Ok(match *ty.kind() {
376 ty::Bool => tcx.intern_layout(LayoutS::scalar(
378 Scalar::Initialized {
379 value: Int(I8, false),
380 valid_range: WrappingRange { start: 0, end: 1 },
383 ty::Char => tcx.intern_layout(LayoutS::scalar(
385 Scalar::Initialized {
386 value: Int(I32, false),
387 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
390 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
391 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
392 ty::Float(fty) => scalar(match fty {
393 ty::FloatTy::F32 => F32,
394 ty::FloatTy::F64 => F64,
397 let mut ptr = scalar_unit(Pointer);
398 ptr.valid_range_mut().start = 1;
399 tcx.intern_layout(LayoutS::scalar(cx, ptr))
403 ty::Never => tcx.intern_layout(LayoutS {
404 variants: Variants::Single { index: VariantIdx::new(0) },
405 fields: FieldsShape::Primitive,
406 abi: Abi::Uninhabited,
412 // Potentially-wide pointers.
413 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
414 let mut data_ptr = scalar_unit(Pointer);
415 if !ty.is_unsafe_ptr() {
416 data_ptr.valid_range_mut().start = 1;
419 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
420 if pointee.is_sized(tcx, param_env) {
421 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
424 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
425 let metadata = match unsized_part.kind() {
427 return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
429 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
431 let mut vtable = scalar_unit(Pointer);
432 vtable.valid_range_mut().start = 1;
435 _ => return Err(LayoutError::Unknown(unsized_part)),
438 // Effectively a (ptr, meta) tuple.
439 tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
442 ty::Dynamic(_, _, ty::DynStar) => {
443 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
444 data.valid_range_mut().start = 0;
445 let mut vtable = scalar_unit(Pointer);
446 vtable.valid_range_mut().start = 1;
447 tcx.intern_layout(scalar_pair(cx, data, vtable))
450 // Arrays and slices.
451 ty::Array(element, mut count) => {
452 if count.has_projections() {
453 count = tcx.normalize_erasing_regions(param_env, count);
454 if count.has_projections() {
455 return Err(LayoutError::Unknown(ty));
459 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
460 let element = cx.layout_of(element)?;
461 let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
463 let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
466 Abi::Aggregate { sized: true }
469 let largest_niche = if count != 0 { element.largest_niche } else { None };
471 tcx.intern_layout(LayoutS {
472 variants: Variants::Single { index: VariantIdx::new(0) },
473 fields: FieldsShape::Array { stride: element.size, count },
476 align: element.align,
480 ty::Slice(element) => {
481 let element = cx.layout_of(element)?;
482 tcx.intern_layout(LayoutS {
483 variants: Variants::Single { index: VariantIdx::new(0) },
484 fields: FieldsShape::Array { stride: element.size, count: 0 },
485 abi: Abi::Aggregate { sized: false },
487 align: element.align,
491 ty::Str => tcx.intern_layout(LayoutS {
492 variants: Variants::Single { index: VariantIdx::new(0) },
493 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
494 abi: Abi::Aggregate { sized: false },
501 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
502 ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
503 let mut unit = univariant_uninterned(
507 &ReprOptions::default(),
508 StructKind::AlwaysSized,
511 Abi::Aggregate { ref mut sized } => *sized = false,
514 tcx.intern_layout(unit)
517 ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
519 ty::Closure(_, ref substs) => {
520 let tys = substs.as_closure().upvar_tys();
522 &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
523 &ReprOptions::default(),
524 StructKind::AlwaysSized,
530 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
533 &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
534 &ReprOptions::default(),
539 // SIMD vector types.
540 ty::Adt(def, substs) if def.repr().simd() => {
541 if !def.is_struct() {
542 // Should have yielded E0517 by now.
543 tcx.sess.delay_span_bug(
545 "#[repr(simd)] was applied to an ADT that is not a struct",
547 return Err(LayoutError::Unknown(ty));
550 // Supported SIMD vectors are homogeneous ADTs with at least one field:
552 // * #[repr(simd)] struct S(T, T, T, T);
553 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
554 // * #[repr(simd)] struct S([T; 4])
556 // where T is a primitive scalar (integer/float/pointer).
558 // SIMD vectors with zero fields are not supported.
559 // (should be caught by typeck)
560 if def.non_enum_variant().fields.is_empty() {
561 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
564 // Type of the first ADT field:
565 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
567 // Heterogeneous SIMD vectors are not supported:
568 // (should be caught by typeck)
569 for fi in &def.non_enum_variant().fields {
570 if fi.ty(tcx, substs) != f0_ty {
571 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
575 // The element type and number of elements of the SIMD vector
576 // are obtained from:
578 // * the element type and length of the single array field, if
579 // the first field is of array type, or
581 // * the homogeneous field type and the number of fields.
582 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
583 // First ADT field is an array:
585 // SIMD vectors with multiple array fields are not supported:
586 // (should be caught by typeck)
587 if def.non_enum_variant().fields.len() != 1 {
588 tcx.sess.fatal(&format!(
589 "monomorphising SIMD type `{}` with more than one array field",
594 // Extract the number of elements from the layout of the array field:
595 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
596 return Err(LayoutError::Unknown(ty));
599 (*e_ty, *count, true)
601 // First ADT field is not an array:
602 (f0_ty, def.non_enum_variant().fields.len() as _, false)
605 // SIMD vectors of zero length are not supported.
606 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
609 // Can't be caught in typeck if the array length is generic.
611 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
612 } else if e_len > MAX_SIMD_LANES {
613 tcx.sess.fatal(&format!(
614 "monomorphising SIMD type `{}` of length greater than {}",
619 // Compute the ABI of the element type:
620 let e_ly = cx.layout_of(e_ty)?;
621 let Abi::Scalar(e_abi) = e_ly.abi else {
622 // This error isn't caught in typeck, e.g., if
623 // the element type of the vector is generic.
624 tcx.sess.fatal(&format!(
625 "monomorphising SIMD type `{}` with a non-primitive-scalar \
626 (integer/float/pointer) element type `{}`",
631 // Compute the size and alignment of the vector:
632 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
633 let align = dl.vector_align(size);
634 let size = size.align_to(align.abi);
636 // Compute the placement of the vector fields:
637 let fields = if is_array {
638 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
640 FieldsShape::Array { stride: e_ly.size, count: e_len }
643 tcx.intern_layout(LayoutS {
644 variants: Variants::Single { index: VariantIdx::new(0) },
646 abi: Abi::Vector { element: e_abi, count: e_len },
647 largest_niche: e_ly.largest_niche,
654 ty::Adt(def, substs) => {
655 // Cache the field layouts.
662 .map(|field| cx.layout_of(field.ty(tcx, substs)))
663 .collect::<Result<Vec<_>, _>>()
665 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
668 if def.repr().pack.is_some() && def.repr().align.is_some() {
669 cx.tcx.sess.delay_span_bug(
670 tcx.def_span(def.did()),
671 "union cannot be packed and aligned",
673 return Err(LayoutError::Unknown(ty));
677 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
679 if let Some(repr_align) = def.repr().align {
680 align = align.max(AbiAndPrefAlign::new(repr_align));
683 let optimize = !def.repr().inhibit_union_abi_opt();
684 let mut size = Size::ZERO;
685 let mut abi = Abi::Aggregate { sized: true };
686 let index = VariantIdx::new(0);
687 for field in &variants[index] {
688 assert!(field.is_sized());
689 align = align.max(field.align);
691 // If all non-ZST fields have the same ABI, forward this ABI
692 if optimize && !field.is_zst() {
693 // Discard valid range information and allow undef
694 let field_abi = match field.abi {
695 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
696 Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
697 Abi::Vector { element: x, count } => {
698 Abi::Vector { element: x.to_union(), count }
700 Abi::Uninhabited | Abi::Aggregate { .. } => {
701 Abi::Aggregate { sized: true }
705 if size == Size::ZERO {
706 // first non ZST: initialize 'abi'
708 } else if abi != field_abi {
709 // different fields have different ABI: reset to Aggregate
710 abi = Abi::Aggregate { sized: true };
714 size = cmp::max(size, field.size);
717 if let Some(pack) = def.repr().pack {
718 align = align.min(AbiAndPrefAlign::new(pack));
721 return Ok(tcx.intern_layout(LayoutS {
722 variants: Variants::Single { index },
723 fields: FieldsShape::Union(
724 NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
729 size: size.align_to(align.abi),
733 // A variant is absent if it's uninhabited and only has ZST fields.
734 // Present uninhabited variants only require space for their fields,
735 // but *not* an encoding of the discriminant (e.g., a tag value).
736 // See issue #49298 for more details on the need to leave space
737 // for non-ZST uninhabited data (mostly partial initialization).
738 let absent = |fields: &[TyAndLayout<'_>]| {
739 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
740 let is_zst = fields.iter().all(|f| f.is_zst());
741 uninhabited && is_zst
743 let (present_first, present_second) = {
744 let mut present_variants = variants
746 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
747 (present_variants.next(), present_variants.next())
749 let present_first = match present_first {
750 Some(present_first) => present_first,
751 // Uninhabited because it has no variants, or only absent ones.
752 None if def.is_enum() => {
753 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
755 // If it's a struct, still compute a layout so that we can still compute the
757 None => VariantIdx::new(0),
760 let is_struct = !def.is_enum() ||
761 // Only one variant is present.
762 (present_second.is_none() &&
763 // Representation optimizations are allowed.
764 !def.repr().inhibit_enum_layout_opt());
766 // Struct, or univariant enum equivalent to a struct.
767 // (Typechecking will reject discriminant-sizing attrs.)
769 let v = present_first;
770 let kind = if def.is_enum() || variants[v].is_empty() {
771 StructKind::AlwaysSized
773 let param_env = tcx.param_env(def.did());
774 let last_field = def.variant(v).fields.last().unwrap();
775 let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
776 if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
779 let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
780 st.variants = Variants::Single { index: v };
782 if def.is_unsafe_cell() {
783 let hide_niches = |scalar: &mut _| match scalar {
784 Scalar::Initialized { value, valid_range } => {
785 *valid_range = WrappingRange::full(value.size(dl))
787 // Already doesn't have any niches
788 Scalar::Union { .. } => {}
791 Abi::Uninhabited => {}
792 Abi::Scalar(scalar) => hide_niches(scalar),
793 Abi::ScalarPair(a, b) => {
797 Abi::Vector { element, count: _ } => hide_niches(element),
798 Abi::Aggregate { sized: _ } => {}
800 st.largest_niche = None;
801 return Ok(tcx.intern_layout(st));
804 let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
806 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
807 // the asserts ensure that we are not using the
808 // `#[rustc_layout_scalar_valid_range(n)]`
809 // attribute to widen the range of anything as that would probably
810 // result in UB somewhere
811 // FIXME(eddyb) the asserts are probably not needed,
812 // as larger validity ranges would result in missed
813 // optimizations, *not* wrongly assuming the inner
814 // value is valid. e.g. unions enlarge validity ranges,
815 // because the values may be uninitialized.
816 if let Bound::Included(start) = start {
817 // FIXME(eddyb) this might be incorrect - it doesn't
818 // account for wrap-around (end < start) ranges.
819 let valid_range = scalar.valid_range_mut();
820 assert!(valid_range.start <= start);
821 valid_range.start = start;
823 if let Bound::Included(end) = end {
824 // FIXME(eddyb) this might be incorrect - it doesn't
825 // account for wrap-around (end < start) ranges.
826 let valid_range = scalar.valid_range_mut();
827 assert!(valid_range.end >= end);
828 valid_range.end = end;
831 // Update `largest_niche` if we have introduced a larger niche.
832 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
833 if let Some(niche) = niche {
834 match st.largest_niche {
835 Some(largest_niche) => {
836 // Replace the existing niche even if they're equal,
837 // because this one is at a lower offset.
838 if largest_niche.available(dl) <= niche.available(dl) {
839 st.largest_niche = Some(niche);
842 None => st.largest_niche = Some(niche),
847 start == Bound::Unbounded && end == Bound::Unbounded,
848 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
854 return Ok(tcx.intern_layout(st));
857 // At this point, we have handled all unions and
858 // structs. (We have also handled univariant enums
859 // that allow representation optimization.)
860 assert!(def.is_enum());
862 // Until we've decided whether to use the tagged or
863 // niche filling LayoutS, we don't want to intern the
864 // variant layouts, so we can't store them in the
865 // overall LayoutS. Store the overall LayoutS
866 // and the variant LayoutSs here until then.
867 struct TmpLayout<'tcx> {
868 layout: LayoutS<'tcx>,
869 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
872 let calculate_niche_filling_layout =
873 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
874 // The current code for niche-filling relies on variant indices
875 // instead of actual discriminants, so enums with
876 // explicit discriminants (RFC #2363) would misbehave.
877 if def.repr().inhibit_enum_layout_opt()
881 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
886 if variants.len() < 2 {
890 let mut align = dl.aggregate_align;
891 let mut variant_layouts = variants
894 let mut st = univariant_uninterned(
899 StructKind::AlwaysSized,
901 st.variants = Variants::Single { index: j };
903 align = align.max(st.align);
907 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
909 let largest_variant_index = match variant_layouts
911 .max_by_key(|(_i, layout)| layout.size.bytes())
912 .map(|(i, _layout)| i)
914 None => return Ok(None),
918 let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
919 let needs_disc = |index: VariantIdx| {
920 index != largest_variant_index && !absent(&variants[index])
922 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
923 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
925 let count = niche_variants.size_hint().1.unwrap() as u128;
927 // Find the field with the largest niche
928 let (field_index, niche, (niche_start, niche_scalar)) = match variants
929 [largest_variant_index]
932 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
933 .max_by_key(|(_, niche)| niche.available(dl))
934 .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
936 None => return Ok(None),
940 let niche_offset = niche.offset
941 + variant_layouts[largest_variant_index].fields.offset(field_index);
942 let niche_size = niche.value.size(dl);
943 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
945 let all_variants_fit =
946 variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
947 if i == largest_variant_index {
951 layout.largest_niche = None;
953 if layout.size <= niche_offset {
954 // This variant will fit before the niche.
958 // Determine if it'll fit after the niche.
959 let this_align = layout.align.abi;
960 let this_offset = (niche_offset + niche_size).align_to(this_align);
962 if this_offset + layout.size > size {
966 // It'll fit, but we need to make some adjustments.
967 match layout.fields {
968 FieldsShape::Arbitrary { ref mut offsets, .. } => {
969 for (j, offset) in offsets.iter_mut().enumerate() {
970 if !variants[i][j].is_zst() {
971 *offset += this_offset;
976 panic!("Layout of fields should be Arbitrary for variants")
980 // It can't be a Scalar or ScalarPair because the offset isn't 0.
981 if !layout.abi.is_uninhabited() {
982 layout.abi = Abi::Aggregate { sized: true };
984 layout.size += this_offset;
989 if !all_variants_fit {
993 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
995 let others_zst = variant_layouts
997 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
998 let same_size = size == variant_layouts[largest_variant_index].size;
999 let same_align = align == variant_layouts[largest_variant_index].align;
1001 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1003 } else if same_size && same_align && others_zst {
1004 match variant_layouts[largest_variant_index].abi {
1005 // When the total alignment and size match, we can use the
1006 // same ABI as the scalar variant with the reserved niche.
1007 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1008 Abi::ScalarPair(first, second) => {
1009 // Only the niche is guaranteed to be initialised,
1010 // so use union layouts for the other primitive.
1011 if niche_offset == Size::ZERO {
1012 Abi::ScalarPair(niche_scalar, second.to_union())
1014 Abi::ScalarPair(first.to_union(), niche_scalar)
1017 _ => Abi::Aggregate { sized: true },
1020 Abi::Aggregate { sized: true }
1023 let layout = LayoutS {
1024 variants: Variants::Multiple {
1026 tag_encoding: TagEncoding::Niche {
1027 untagged_variant: largest_variant_index,
1032 variants: IndexVec::new(),
1034 fields: FieldsShape::Arbitrary {
1035 offsets: vec![niche_offset],
1036 memory_index: vec![0],
1044 Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1047 let niche_filling_layout = calculate_niche_filling_layout()?;
1049 let (mut min, mut max) = (i128::MAX, i128::MIN);
1050 let discr_type = def.repr().discr_type();
1051 let bits = Integer::from_attr(cx, discr_type).size().bits();
1052 for (i, discr) in def.discriminants(tcx) {
1053 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1056 let mut x = discr.val as i128;
1057 if discr_type.is_signed() {
1058 // sign extend the raw representation to be an i128
1059 x = (x << (128 - bits)) >> (128 - bits);
1068 // We might have no inhabited variants, so pretend there's at least one.
1069 if (min, max) == (i128::MAX, i128::MIN) {
1073 assert!(min <= max, "discriminant range is {}...{}", min, max);
1074 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1076 let mut align = dl.aggregate_align;
1077 let mut size = Size::ZERO;
1079 // We're interested in the smallest alignment, so start large.
1080 let mut start_align = Align::from_bytes(256).unwrap();
1081 assert_eq!(Integer::for_align(dl, start_align), None);
1083 // repr(C) on an enum tells us to make a (tag, union) layout,
1084 // so we need to grow the prefix alignment to be at least
1085 // the alignment of the union. (This value is used both for
1086 // determining the alignment of the overall enum, and the
1087 // determining the alignment of the payload after the tag.)
1088 let mut prefix_align = min_ity.align(dl).abi;
1090 for fields in &variants {
1091 for field in fields {
1092 prefix_align = prefix_align.max(field.align.abi);
1097 // Create the set of structs that represent each variant.
1098 let mut layout_variants = variants
1100 .map(|(i, field_layouts)| {
1101 let mut st = univariant_uninterned(
1106 StructKind::Prefixed(min_ity.size(), prefix_align),
1108 st.variants = Variants::Single { index: i };
1109 // Find the first field we can't move later
1110 // to make room for a larger discriminant.
1111 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1112 if !field.is_zst() || field.align.abi.bytes() != 1 {
1113 start_align = start_align.min(field.align.abi);
1117 size = cmp::max(size, st.size);
1118 align = align.max(st.align);
1121 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1123 // Align the maximum variant size to the largest alignment.
1124 size = size.align_to(align.abi);
1126 if size.bytes() >= dl.obj_size_bound() {
1127 return Err(LayoutError::SizeOverflow(ty));
1130 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1131 if typeck_ity < min_ity {
1132 // It is a bug if Layout decided on a greater discriminant size than typeck for
1133 // some reason at this point (based on values discriminant can take on). Mostly
1134 // because this discriminant will be loaded, and then stored into variable of
1135 // type calculated by typeck. Consider such case (a bug): typeck decided on
1136 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1137 // discriminant values. That would be a bug, because then, in codegen, in order
1138 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1139 // space necessary to represent would have to be discarded (or layout is wrong
1140 // on thinking it needs 16 bits)
1142 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1146 // However, it is fine to make discr type however large (as an optimisation)
1147 // after this point – we’ll just truncate the value we load in codegen.
1150 // Check to see if we should use a different type for the
1151 // discriminant. We can safely use a type with the same size
1152 // as the alignment of the first field of each variant.
1153 // We increase the size of the discriminant to avoid LLVM copying
1154 // padding when it doesn't need to. This normally causes unaligned
1155 // load/stores and excessive memcpy/memset operations. By using a
1156 // bigger integer size, LLVM can be sure about its contents and
1157 // won't be so conservative.
1159 // Use the initial field alignment
1160 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1163 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1166 // If the alignment is not larger than the chosen discriminant size,
1167 // don't use the alignment as the final size.
1171 // Patch up the variants' first few fields.
1172 let old_ity_size = min_ity.size();
1173 let new_ity_size = ity.size();
1174 for variant in &mut layout_variants {
1175 match variant.fields {
1176 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1178 if *i <= old_ity_size {
1179 assert_eq!(*i, old_ity_size);
1183 // We might be making the struct larger.
1184 if variant.size <= old_ity_size {
1185 variant.size = new_ity_size;
1193 let tag_mask = ity.size().unsigned_int_max();
1194 let tag = Scalar::Initialized {
1195 value: Int(ity, signed),
1196 valid_range: WrappingRange {
1197 start: (min as u128 & tag_mask),
1198 end: (max as u128 & tag_mask),
1201 let mut abi = Abi::Aggregate { sized: true };
1203 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1204 abi = Abi::Uninhabited;
1205 } else if tag.size(dl) == size {
1206 // Make sure we only use scalar layout when the enum is entirely its
1207 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1208 abi = Abi::Scalar(tag);
1210 // Try to use a ScalarPair for all tagged enums.
1211 let mut common_prim = None;
1212 let mut common_prim_initialized_in_all_variants = true;
1213 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1214 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1217 let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1218 let (field, offset) = match (fields.next(), fields.next()) {
1220 common_prim_initialized_in_all_variants = false;
1223 (Some(pair), None) => pair,
1229 let prim = match field.abi {
1230 Abi::Scalar(scalar) => {
1231 common_prim_initialized_in_all_variants &=
1232 matches!(scalar, Scalar::Initialized { .. });
1240 if let Some(pair) = common_prim {
1241 // This is pretty conservative. We could go fancier
1242 // by conflating things like i32 and u32, or even
1243 // realising that (u8, u8) could just cohabit with
1245 if pair != (prim, offset) {
1250 common_prim = Some((prim, offset));
1253 if let Some((prim, offset)) = common_prim {
1254 let prim_scalar = if common_prim_initialized_in_all_variants {
1257 // Common prim might be uninit.
1258 Scalar::Union { value: prim }
1260 let pair = scalar_pair(cx, tag, prim_scalar);
1261 let pair_offsets = match pair.fields {
1262 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1263 assert_eq!(memory_index, &[0, 1]);
1268 if pair_offsets[0] == Size::ZERO
1269 && pair_offsets[1] == *offset
1270 && align == pair.align
1271 && size == pair.size
1273 // We can use `ScalarPair` only when it matches our
1274 // already computed layout (including `#[repr(C)]`).
1280 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1281 // variants to ensure they are consistent. This is because a downcast is
1282 // semantically a NOP, and thus should not affect layout.
1283 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1284 for variant in &mut layout_variants {
1285 // We only do this for variants with fields; the others are not accessed anyway.
1286 // Also do not overwrite any already existing "clever" ABIs.
1287 if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
1289 // Also need to bump up the size and alignment, so that the entire value fits in here.
1290 variant.size = cmp::max(variant.size, size);
1291 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1296 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1298 let tagged_layout = LayoutS {
1299 variants: Variants::Multiple {
1301 tag_encoding: TagEncoding::Direct,
1303 variants: IndexVec::new(),
1305 fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
1312 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1314 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1316 // Pick the smaller layout; otherwise,
1317 // pick the layout with the larger niche; otherwise,
1318 // pick tagged as it has simpler codegen.
1320 let niche_size = |tmp_l: &TmpLayout<'_>| {
1321 tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1324 tl.layout.size.cmp(&nl.layout.size),
1325 niche_size(&tl).cmp(&niche_size(&nl)),
1328 (Equal, Less) => nl,
1335 // Now we can intern the variant layouts and store them in the enum layout.
1336 best_layout.layout.variants = match best_layout.layout.variants {
1337 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1341 variants: best_layout
1344 .map(|layout| tcx.intern_layout(layout))
1350 tcx.intern_layout(best_layout.layout)
1353 // Types with no meaningful known layout.
1354 ty::Projection(_) | ty::Opaque(..) => {
1355 // NOTE(eddyb) `layout_of` query should've normalized these away,
1356 // if that was possible, so there's no reason to try again here.
1357 return Err(LayoutError::Unknown(ty));
1360 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1361 bug!("Layout::compute: unexpected type `{}`", ty)
1364 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1365 return Err(LayoutError::Unknown(ty));
1370 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1371 #[derive(Clone, Debug, PartialEq)]
1372 enum SavedLocalEligibility {
1374 Assigned(VariantIdx),
1375 // FIXME: Use newtype_index so we aren't wasting bytes
1376 Ineligible(Option<u32>),
1379 // When laying out generators, we divide our saved local fields into two
1380 // categories: overlap-eligible and overlap-ineligible.
1382 // Those fields which are ineligible for overlap go in a "prefix" at the
1383 // beginning of the layout, and always have space reserved for them.
1385 // Overlap-eligible fields are only assigned to one variant, so we lay
1386 // those fields out for each variant and put them right after the
1389 // Finally, in the layout details, we point to the fields from the
1390 // variants they are assigned to. It is possible for some fields to be
1391 // included in multiple variants. No field ever "moves around" in the
1392 // layout; its offset is always the same.
1394 // Also included in the layout are the upvars and the discriminant.
1395 // These are included as fields on the "outer" layout; they are not part
1398 /// Compute the eligibility and assignment of each local.
1399 fn generator_saved_local_eligibility<'tcx>(
1400 info: &GeneratorLayout<'tcx>,
1401 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1402 use SavedLocalEligibility::*;
1404 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1405 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1407 // The saved locals not eligible for overlap. These will get
1408 // "promoted" to the prefix of our generator.
1409 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1411 // Figure out which of our saved locals are fields in only
1412 // one variant. The rest are deemed ineligible for overlap.
1413 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1414 for local in fields {
1415 match assignments[*local] {
1417 assignments[*local] = Assigned(variant_index);
1420 // We've already seen this local at another suspension
1421 // point, so it is no longer a candidate.
1423 "removing local {:?} in >1 variant ({:?}, {:?})",
1428 ineligible_locals.insert(*local);
1429 assignments[*local] = Ineligible(None);
1436 // Next, check every pair of eligible locals to see if they
1438 for local_a in info.storage_conflicts.rows() {
1439 let conflicts_a = info.storage_conflicts.count(local_a);
1440 if ineligible_locals.contains(local_a) {
1444 for local_b in info.storage_conflicts.iter(local_a) {
1445 // local_a and local_b are storage live at the same time, therefore they
1446 // cannot overlap in the generator layout. The only way to guarantee
1447 // this is if they are in the same variant, or one is ineligible
1448 // (which means it is stored in every variant).
1449 if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
1453 // If they conflict, we will choose one to make ineligible.
1454 // This is not always optimal; it's just a greedy heuristic that
1455 // seems to produce good results most of the time.
1456 let conflicts_b = info.storage_conflicts.count(local_b);
1457 let (remove, other) =
1458 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1459 ineligible_locals.insert(remove);
1460 assignments[remove] = Ineligible(None);
1461 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1465 // Count the number of variants in use. If only one of them, then it is
1466 // impossible to overlap any locals in our layout. In this case it's
1467 // always better to make the remaining locals ineligible, so we can
1468 // lay them out with the other locals in the prefix and eliminate
1469 // unnecessary padding bytes.
1471 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1472 for assignment in &assignments {
1473 if let Assigned(idx) = assignment {
1474 used_variants.insert(*idx);
1477 if used_variants.count() < 2 {
1478 for assignment in assignments.iter_mut() {
1479 *assignment = Ineligible(None);
1481 ineligible_locals.insert_all();
1485 // Write down the order of our locals that will be promoted to the prefix.
1487 for (idx, local) in ineligible_locals.iter().enumerate() {
1488 assignments[local] = Ineligible(Some(idx as u32));
1491 debug!("generator saved local assignments: {:?}", assignments);
1493 (ineligible_locals, assignments)
1496 /// Compute the full generator layout.
1497 fn generator_layout<'tcx>(
1498 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1500 def_id: hir::def_id::DefId,
1501 substs: SubstsRef<'tcx>,
1502 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1503 use SavedLocalEligibility::*;
1505 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1507 let Some(info) = tcx.generator_layout(def_id) else {
1508 return Err(LayoutError::Unknown(ty));
1510 let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
1512 // Build a prefix layout, including "promoting" all ineligible
1513 // locals as part of the prefix. We compute the layout of all of
1514 // these fields at once to get optimal packing.
1515 let tag_index = substs.as_generator().prefix_tys().count();
1517 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1518 let max_discr = (info.variant_fields.len() - 1) as u128;
1519 let discr_int = Integer::fit_unsigned(max_discr);
1520 let discr_int_ty = discr_int.to_ty(tcx, false);
1521 let tag = Scalar::Initialized {
1522 value: Primitive::Int(discr_int, false),
1523 valid_range: WrappingRange { start: 0, end: max_discr },
1525 let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
1526 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1528 let promoted_layouts = ineligible_locals
1530 .map(|local| subst_field(info.field_tys[local]))
1531 .map(|ty| tcx.mk_maybe_uninit(ty))
1532 .map(|ty| cx.layout_of(ty));
1533 let prefix_layouts = substs
1536 .map(|ty| cx.layout_of(ty))
1537 .chain(iter::once(Ok(tag_layout)))
1538 .chain(promoted_layouts)
1539 .collect::<Result<Vec<_>, _>>()?;
1540 let prefix = univariant_uninterned(
1544 &ReprOptions::default(),
1545 StructKind::AlwaysSized,
1548 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1550 // Split the prefix layout into the "outer" fields (upvars and
1551 // discriminant) and the "promoted" fields. Promoted fields will
1552 // get included in each variant that requested them in
1554 debug!("prefix = {:#?}", prefix);
1555 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1556 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1557 let mut inverse_memory_index = invert_mapping(&memory_index);
1559 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1560 // "outer" and "promoted" fields respectively.
1561 let b_start = (tag_index + 1) as u32;
1562 let offsets_b = offsets.split_off(b_start as usize);
1563 let offsets_a = offsets;
1565 // Disentangle the "a" and "b" components of `inverse_memory_index`
1566 // by preserving the order but keeping only one disjoint "half" each.
1567 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1568 let inverse_memory_index_b: Vec<_> =
1569 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1570 inverse_memory_index.retain(|&i| i < b_start);
1571 let inverse_memory_index_a = inverse_memory_index;
1573 // Since `inverse_memory_index_{a,b}` each only refer to their
1574 // respective fields, they can be safely inverted
1575 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1576 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1579 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1580 (outer_fields, offsets_b, memory_index_b)
1585 let mut size = prefix.size;
1586 let mut align = prefix.align;
1590 .map(|(index, variant_fields)| {
1591 // Only include overlap-eligible fields when we compute our variant layout.
1592 let variant_only_tys = variant_fields
1594 .filter(|local| match assignments[**local] {
1595 Unassigned => bug!(),
1596 Assigned(v) if v == index => true,
1597 Assigned(_) => bug!("assignment does not match variant"),
1598 Ineligible(_) => false,
1600 .map(|local| subst_field(info.field_tys[*local]));
1602 let mut variant = univariant_uninterned(
1605 &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1606 &ReprOptions::default(),
1607 StructKind::Prefixed(prefix_size, prefix_align.abi),
1609 variant.variants = Variants::Single { index };
1611 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1615 // Now, stitch the promoted and variant-only fields back together in
1616 // the order they are mentioned by our GeneratorLayout.
1617 // Because we only use some subset (that can differ between variants)
1618 // of the promoted fields, we can't just pick those elements of the
1619 // `promoted_memory_index` (as we'd end up with gaps).
1620 // So instead, we build an "inverse memory_index", as if all of the
1621 // promoted fields were being used, but leave the elements not in the
1622 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1623 // obtain a valid (bijective) mapping.
1624 const INVALID_FIELD_IDX: u32 = !0;
1625 let mut combined_inverse_memory_index =
1626 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1627 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1628 let combined_offsets = variant_fields
1632 let (offset, memory_index) = match assignments[*local] {
1633 Unassigned => bug!(),
1635 let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1636 (offset, promoted_memory_index.len() as u32 + memory_index)
1638 Ineligible(field_idx) => {
1639 let field_idx = field_idx.unwrap() as usize;
1640 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1643 combined_inverse_memory_index[memory_index as usize] = i as u32;
1648 // Remove the unused slots and invert the mapping to obtain the
1649 // combined `memory_index` (also see previous comment).
1650 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1651 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1653 variant.fields = FieldsShape::Arbitrary {
1654 offsets: combined_offsets,
1655 memory_index: combined_memory_index,
1658 size = size.max(variant.size);
1659 align = align.max(variant.align);
1660 Ok(tcx.intern_layout(variant))
1662 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1664 size = size.align_to(align.abi);
1666 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1669 Abi::Aggregate { sized: true }
1672 let layout = tcx.intern_layout(LayoutS {
1673 variants: Variants::Multiple {
1675 tag_encoding: TagEncoding::Direct,
1676 tag_field: tag_index,
1679 fields: outer_fields,
1681 largest_niche: prefix.largest_niche,
1685 debug!("generator layout ({:?}): {:#?}", ty, layout);
1689 /// This is invoked by the `layout_of` query to record the final
1690 /// layout of each type.
1692 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
1693 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1694 // for dumping later.
1695 if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
1696 record_layout_for_printing_outlined(cx, layout)
1700 fn record_layout_for_printing_outlined<'tcx>(
1701 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1702 layout: TyAndLayout<'tcx>,
1704 // Ignore layouts that are done with non-empty environments or
1705 // non-monomorphic layouts, as the user only wants to see the stuff
1706 // resulting from the final codegen session.
1707 if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
1711 // (delay format until we actually need it)
1712 let record = |kind, packed, opt_discr_size, variants| {
1713 let type_desc = format!("{:?}", layout.ty);
1714 cx.tcx.sess.code_stats.record_type_size(
1725 let adt_def = match *layout.ty.kind() {
1726 ty::Adt(ref adt_def, _) => {
1727 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1731 ty::Closure(..) => {
1732 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1733 record(DataTypeKind::Closure, false, None, vec![]);
1738 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1743 let adt_kind = adt_def.adt_kind();
1744 let adt_packed = adt_def.repr().pack.is_some();
1746 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1747 let mut min_size = Size::ZERO;
1748 let field_info: Vec<_> = flds
1752 let field_layout = layout.field(cx, i);
1753 let offset = layout.fields.offset(i);
1754 let field_end = offset + field_layout.size;
1755 if min_size < field_end {
1756 min_size = field_end;
1760 offset: offset.bytes(),
1761 size: field_layout.size.bytes(),
1762 align: field_layout.align.abi.bytes(),
1769 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1770 align: layout.align.abi.bytes(),
1771 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1776 match layout.variants {
1777 Variants::Single { index } => {
1778 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1779 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
1780 let variant_def = &adt_def.variant(index);
1781 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1786 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1789 // (This case arises for *empty* enums; so give it
1791 record(adt_kind.into(), adt_packed, None, vec![]);
1795 Variants::Multiple { tag, ref tag_encoding, .. } => {
1797 "print-type-size `{:#?}` adt general variants def {}",
1799 adt_def.variants().len()
1801 let variant_infos: Vec<_> = adt_def
1804 .map(|(i, variant_def)| {
1805 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1806 build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
1812 match tag_encoding {
1813 TagEncoding::Direct => Some(tag.size(cx)),