]> git.lizzy.rs Git - rust.git/blobdiff - compiler/rustc_ty_utils/src/layout.rs
Rollup merge of #105002 - zertosh:acp-140, r=dtolnay
[rust.git] / compiler / rustc_ty_utils / src / layout.rs
index 07af3dc5164786208a9e05784f5cdc169a4c8f47..fbc055b5d238f8690555df090b99c3e267f4b126 100644 (file)
 use rustc_span::DUMMY_SP;
 use rustc_target::abi::*;
 
-use std::cmp::{self, Ordering};
+use std::fmt::Debug;
 use std::iter;
-use std::num::NonZeroUsize;
-use std::ops::Bound;
-
-use rand::{seq::SliceRandom, SeedableRng};
-use rand_xoshiro::Xoshiro128StarStar;
 
 use crate::layout_sanity_check::sanity_check_layout;
 
@@ -66,16 +61,6 @@ fn layout_of<'tcx>(
     Ok(layout)
 }
 
-#[derive(Copy, Clone, Debug)]
-enum StructKind {
-    /// A tuple, closure, or univariant which cannot be coerced to unsized.
-    AlwaysSized,
-    /// A univariant, the last field of which may be coerced to unsized.
-    MaybeUnsized,
-    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
-    Prefixed(Size, Align),
-}
-
 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
 // This is used to go between `memory_index` (source field order to memory order)
 // and `inverse_memory_index` (memory order to source field order).
@@ -89,40 +74,13 @@ fn invert_mapping(map: &[u32]) -> Vec<u32> {
     inverse
 }
 
-fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
-    let dl = cx.data_layout();
-    let b_align = b.align(dl);
-    let align = a.align(dl).max(b_align).max(dl.aggregate_align);
-    let b_offset = a.size(dl).align_to(b_align.abi);
-    let size = (b_offset + b.size(dl)).align_to(align.abi);
-
-    // HACK(nox): We iter on `b` and then `a` because `max_by_key`
-    // returns the last maximum.
-    let largest_niche = Niche::from_scalar(dl, b_offset, b)
-        .into_iter()
-        .chain(Niche::from_scalar(dl, Size::ZERO, a))
-        .max_by_key(|niche| niche.available(dl));
-
-    LayoutS {
-        variants: Variants::Single { index: VariantIdx::new(0) },
-        fields: FieldsShape::Arbitrary {
-            offsets: vec![Size::ZERO, b_offset],
-            memory_index: vec![0, 1],
-        },
-        abi: Abi::ScalarPair(a, b),
-        largest_niche,
-        align,
-        size,
-    }
-}
-
 fn univariant_uninterned<'tcx>(
     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
     ty: Ty<'tcx>,
     fields: &[TyAndLayout<'_>],
     repr: &ReprOptions,
     kind: StructKind,
-) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
+) -> Result<LayoutS<VariantIdx>, LayoutError<'tcx>> {
     let dl = cx.data_layout();
     let pack = repr.pack;
     if pack.is_some() && repr.align.is_some() {
@@ -130,226 +88,7 @@ fn univariant_uninterned<'tcx>(
         return Err(LayoutError::Unknown(ty));
     }
 
-    let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
-    let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
-
-    let optimize = !repr.inhibit_struct_field_reordering_opt();
-    if optimize {
-        let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
-        let optimizing = &mut inverse_memory_index[..end];
-        let effective_field_align = |f: &TyAndLayout<'_>| {
-            if let Some(pack) = pack {
-                // return the packed alignment in bytes
-                f.align.abi.min(pack).bytes()
-            } else {
-                // returns log2(effective-align).
-                // This is ok since `pack` applies to all fields equally.
-                // The calculation assumes that size is an integer multiple of align, except for ZSTs.
-                //
-                // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
-                f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
-            }
-        };
-
-        // If `-Z randomize-layout` was enabled for the type definition we can shuffle
-        // the field ordering to try and catch some code making assumptions about layouts
-        // we don't guarantee
-        if repr.can_randomize_type_layout() {
-            // `ReprOptions.layout_seed` is a deterministic seed that we can use to
-            // randomize field ordering with
-            let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
-
-            // Shuffle the ordering of the fields
-            optimizing.shuffle(&mut rng);
-
-            // Otherwise we just leave things alone and actually optimize the type's fields
-        } else {
-            match kind {
-                StructKind::AlwaysSized | StructKind::MaybeUnsized => {
-                    optimizing.sort_by_key(|&x| {
-                        // Place ZSTs first to avoid "interesting offsets",
-                        // especially with only one or two non-ZST fields.
-                        // Then place largest alignments first, largest niches within an alignment group last
-                        let f = &fields[x as usize];
-                        let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
-                        (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
-                    });
-                }
-
-                StructKind::Prefixed(..) => {
-                    // Sort in ascending alignment so that the layout stays optimal
-                    // regardless of the prefix.
-                    // And put the largest niche in an alignment group at the end
-                    // so it can be used as discriminant in jagged enums
-                    optimizing.sort_by_key(|&x| {
-                        let f = &fields[x as usize];
-                        let niche_size = f.largest_niche.map_or(0, |n| n.available(cx));
-                        (effective_field_align(f), niche_size)
-                    });
-                }
-            }
-
-            // FIXME(Kixiron): We can always shuffle fields within a given alignment class
-            //                 regardless of the status of `-Z randomize-layout`
-        }
-    }
-
-    // inverse_memory_index holds field indices by increasing memory offset.
-    // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
-    // We now write field offsets to the corresponding offset slot;
-    // field 5 with offset 0 puts 0 in offsets[5].
-    // At the bottom of this function, we invert `inverse_memory_index` to
-    // produce `memory_index` (see `invert_mapping`).
-
-    let mut sized = true;
-    let mut offsets = vec![Size::ZERO; fields.len()];
-    let mut offset = Size::ZERO;
-    let mut largest_niche = None;
-    let mut largest_niche_available = 0;
-
-    if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
-        let prefix_align =
-            if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
-        align = align.max(AbiAndPrefAlign::new(prefix_align));
-        offset = prefix_size.align_to(prefix_align);
-    }
-
-    for &i in &inverse_memory_index {
-        let field = fields[i as usize];
-        if !sized {
-            cx.tcx.sess.delay_span_bug(
-                DUMMY_SP,
-                &format!(
-                    "univariant: field #{} of `{}` comes after unsized field",
-                    offsets.len(),
-                    ty
-                ),
-            );
-        }
-
-        if field.is_unsized() {
-            sized = false;
-        }
-
-        // Invariant: offset < dl.obj_size_bound() <= 1<<61
-        let field_align = if let Some(pack) = pack {
-            field.align.min(AbiAndPrefAlign::new(pack))
-        } else {
-            field.align
-        };
-        offset = offset.align_to(field_align.abi);
-        align = align.max(field_align);
-
-        debug!("univariant offset: {:?} field: {:#?}", offset, field);
-        offsets[i as usize] = offset;
-
-        if let Some(mut niche) = field.largest_niche {
-            let available = niche.available(dl);
-            if available > largest_niche_available {
-                largest_niche_available = available;
-                niche.offset += offset;
-                largest_niche = Some(niche);
-            }
-        }
-
-        offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
-    }
-
-    if let Some(repr_align) = repr.align {
-        align = align.max(AbiAndPrefAlign::new(repr_align));
-    }
-
-    debug!("univariant min_size: {:?}", offset);
-    let min_size = offset;
-
-    // As stated above, inverse_memory_index holds field indices by increasing offset.
-    // This makes it an already-sorted view of the offsets vec.
-    // To invert it, consider:
-    // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
-    // Field 5 would be the first element, so memory_index is i:
-    // Note: if we didn't optimize, it's already right.
-
-    let memory_index =
-        if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
-
-    let size = min_size.align_to(align.abi);
-    let mut abi = Abi::Aggregate { sized };
-
-    // Unpack newtype ABIs and find scalar pairs.
-    if sized && size.bytes() > 0 {
-        // All other fields must be ZSTs.
-        let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
-        match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
-            // We have exactly one non-ZST field.
-            (Some((i, field)), None, None) => {
-                // Field fills the struct and it has a scalar or scalar pair ABI.
-                if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
-                    match field.abi {
-                        // For plain scalars, or vectors of them, we can't unpack
-                        // newtypes for `#[repr(C)]`, as that affects C ABIs.
-                        Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
-                            abi = field.abi;
-                        }
-                        // But scalar pairs are Rust-specific and get
-                        // treated as aggregates by C ABIs anyway.
-                        Abi::ScalarPair(..) => {
-                            abi = field.abi;
-                        }
-                        _ => {}
-                    }
-                }
-            }
-
-            // Two non-ZST fields, and they're both scalars.
-            (Some((i, a)), Some((j, b)), None) => {
-                match (a.abi, b.abi) {
-                    (Abi::Scalar(a), Abi::Scalar(b)) => {
-                        // Order by the memory placement, not source order.
-                        let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
-                            ((i, a), (j, b))
-                        } else {
-                            ((j, b), (i, a))
-                        };
-                        let pair = scalar_pair(cx, a, b);
-                        let pair_offsets = match pair.fields {
-                            FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                                assert_eq!(memory_index, &[0, 1]);
-                                offsets
-                            }
-                            _ => bug!(),
-                        };
-                        if offsets[i] == pair_offsets[0]
-                            && offsets[j] == pair_offsets[1]
-                            && align == pair.align
-                            && size == pair.size
-                        {
-                            // We can use `ScalarPair` only when it matches our
-                            // already computed layout (including `#[repr(C)]`).
-                            abi = pair.abi;
-                        }
-                    }
-                    _ => {}
-                }
-            }
-
-            _ => {}
-        }
-    }
-
-    if fields.iter().any(|f| f.abi.is_uninhabited()) {
-        abi = Abi::Uninhabited;
-    }
-
-    Ok(LayoutS {
-        variants: Variants::Single { index: VariantIdx::new(0) },
-        fields: FieldsShape::Arbitrary { offsets, memory_index },
-        abi,
-        largest_niche,
-        align,
-        size,
-    })
+    cx.univariant(dl, fields, repr, kind).ok_or(LayoutError::SizeOverflow(ty))
 }
 
 fn layout_of_uncached<'tcx>(
@@ -400,14 +139,7 @@ fn layout_of_uncached<'tcx>(
         }
 
         // The never type.
-        ty::Never => tcx.intern_layout(LayoutS {
-            variants: Variants::Single { index: VariantIdx::new(0) },
-            fields: FieldsShape::Primitive,
-            abi: Abi::Uninhabited,
-            largest_niche: None,
-            align: dl.i8_align,
-            size: Size::ZERO,
-        }),
+        ty::Never => tcx.intern_layout(cx.layout_of_never_type()),
 
         // Potentially-wide pointers.
         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
@@ -436,7 +168,7 @@ fn layout_of_uncached<'tcx>(
             };
 
             // Effectively a (ptr, meta) tuple.
-            tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
+            tcx.intern_layout(cx.scalar_pair(data_ptr, metadata))
         }
 
         ty::Dynamic(_, _, ty::DynStar) => {
@@ -444,7 +176,7 @@ fn layout_of_uncached<'tcx>(
             data.valid_range_mut().start = 0;
             let mut vtable = scalar_unit(Pointer);
             vtable.valid_range_mut().start = 1;
-            tcx.intern_layout(scalar_pair(cx, data, vtable))
+            tcx.intern_layout(cx.scalar_pair(data, vtable))
         }
 
         // Arrays and slices.
@@ -593,8 +325,8 @@ fn layout_of_uncached<'tcx>(
 
                 // Extract the number of elements from the layout of the array field:
                 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
-                        return Err(LayoutError::Unknown(ty));
-                    };
+                    return Err(LayoutError::Unknown(ty));
+                };
 
                 (*e_ty, *count, true)
             } else {
@@ -619,14 +351,14 @@ fn layout_of_uncached<'tcx>(
             // Compute the ABI of the element type:
             let e_ly = cx.layout_of(e_ty)?;
             let Abi::Scalar(e_abi) = e_ly.abi else {
-                    // This error isn't caught in typeck, e.g., if
-                    // the element type of the vector is generic.
-                    tcx.sess.fatal(&format!(
-                        "monomorphising SIMD type `{}` with a non-primitive-scalar \
-                        (integer/float/pointer) element type `{}`",
-                        ty, e_ty
-                    ))
-                };
+                // This error isn't caught in typeck, e.g., if
+                // the element type of the vector is generic.
+                tcx.sess.fatal(&format!(
+                    "monomorphising SIMD type `{}` with a non-primitive-scalar \
+                    (integer/float/pointer) element type `{}`",
+                    ty, e_ty
+                ))
+            };
 
             // Compute the size and alignment of the vector:
             let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
@@ -673,681 +405,41 @@ fn layout_of_uncached<'tcx>(
                     return Err(LayoutError::Unknown(ty));
                 }
 
-                let mut align =
-                    if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
-                if let Some(repr_align) = def.repr().align {
-                    align = align.max(AbiAndPrefAlign::new(repr_align));
-                }
-
-                let optimize = !def.repr().inhibit_union_abi_opt();
-                let mut size = Size::ZERO;
-                let mut abi = Abi::Aggregate { sized: true };
-                let index = VariantIdx::new(0);
-                for field in &variants[index] {
-                    assert!(field.is_sized());
-                    align = align.max(field.align);
-
-                    // If all non-ZST fields have the same ABI, forward this ABI
-                    if optimize && !field.is_zst() {
-                        // Discard valid range information and allow undef
-                        let field_abi = match field.abi {
-                            Abi::Scalar(x) => Abi::Scalar(x.to_union()),
-                            Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
-                            Abi::Vector { element: x, count } => {
-                                Abi::Vector { element: x.to_union(), count }
-                            }
-                            Abi::Uninhabited | Abi::Aggregate { .. } => {
-                                Abi::Aggregate { sized: true }
-                            }
-                        };
-
-                        if size == Size::ZERO {
-                            // first non ZST: initialize 'abi'
-                            abi = field_abi;
-                        } else if abi != field_abi {
-                            // different fields have different ABI: reset to Aggregate
-                            abi = Abi::Aggregate { sized: true };
-                        }
-                    }
-
-                    size = cmp::max(size, field.size);
-                }
-
-                if let Some(pack) = def.repr().pack {
-                    align = align.min(AbiAndPrefAlign::new(pack));
-                }
-
-                return Ok(tcx.intern_layout(LayoutS {
-                    variants: Variants::Single { index },
-                    fields: FieldsShape::Union(
-                        NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
-                    ),
-                    abi,
-                    largest_niche: None,
-                    align,
-                    size: size.align_to(align.abi),
-                }));
-            }
-
-            // A variant is absent if it's uninhabited and only has ZST fields.
-            // Present uninhabited variants only require space for their fields,
-            // but *not* an encoding of the discriminant (e.g., a tag value).
-            // See issue #49298 for more details on the need to leave space
-            // for non-ZST uninhabited data (mostly partial initialization).
-            let absent = |fields: &[TyAndLayout<'_>]| {
-                let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
-                let is_zst = fields.iter().all(|f| f.is_zst());
-                uninhabited && is_zst
-            };
-            let (present_first, present_second) = {
-                let mut present_variants = variants
-                    .iter_enumerated()
-                    .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
-                (present_variants.next(), present_variants.next())
-            };
-            let present_first = match present_first {
-                Some(present_first) => present_first,
-                // Uninhabited because it has no variants, or only absent ones.
-                None if def.is_enum() => {
-                    return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
-                }
-                // If it's a struct, still compute a layout so that we can still compute the
-                // field offsets.
-                None => VariantIdx::new(0),
-            };
-
-            let is_struct = !def.is_enum() ||
-                    // Only one variant is present.
-                    (present_second.is_none() &&
-                        // Representation optimizations are allowed.
-                        !def.repr().inhibit_enum_layout_opt());
-            if is_struct {
-                // Struct, or univariant enum equivalent to a struct.
-                // (Typechecking will reject discriminant-sizing attrs.)
-
-                let v = present_first;
-                let kind = if def.is_enum() || variants[v].is_empty() {
-                    StructKind::AlwaysSized
-                } else {
-                    let param_env = tcx.param_env(def.did());
-                    let last_field = def.variant(v).fields.last().unwrap();
-                    let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
-                    if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
-                };
-
-                let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
-                st.variants = Variants::Single { index: v };
-
-                if def.is_unsafe_cell() {
-                    let hide_niches = |scalar: &mut _| match scalar {
-                        Scalar::Initialized { value, valid_range } => {
-                            *valid_range = WrappingRange::full(value.size(dl))
-                        }
-                        // Already doesn't have any niches
-                        Scalar::Union { .. } => {}
-                    };
-                    match &mut st.abi {
-                        Abi::Uninhabited => {}
-                        Abi::Scalar(scalar) => hide_niches(scalar),
-                        Abi::ScalarPair(a, b) => {
-                            hide_niches(a);
-                            hide_niches(b);
-                        }
-                        Abi::Vector { element, count: _ } => hide_niches(element),
-                        Abi::Aggregate { sized: _ } => {}
-                    }
-                    st.largest_niche = None;
-                    return Ok(tcx.intern_layout(st));
-                }
-
-                let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
-                match st.abi {
-                    Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
-                        // the asserts ensure that we are not using the
-                        // `#[rustc_layout_scalar_valid_range(n)]`
-                        // attribute to widen the range of anything as that would probably
-                        // result in UB somewhere
-                        // FIXME(eddyb) the asserts are probably not needed,
-                        // as larger validity ranges would result in missed
-                        // optimizations, *not* wrongly assuming the inner
-                        // value is valid. e.g. unions enlarge validity ranges,
-                        // because the values may be uninitialized.
-                        if let Bound::Included(start) = start {
-                            // FIXME(eddyb) this might be incorrect - it doesn't
-                            // account for wrap-around (end < start) ranges.
-                            let valid_range = scalar.valid_range_mut();
-                            assert!(valid_range.start <= start);
-                            valid_range.start = start;
-                        }
-                        if let Bound::Included(end) = end {
-                            // FIXME(eddyb) this might be incorrect - it doesn't
-                            // account for wrap-around (end < start) ranges.
-                            let valid_range = scalar.valid_range_mut();
-                            assert!(valid_range.end >= end);
-                            valid_range.end = end;
-                        }
-
-                        // Update `largest_niche` if we have introduced a larger niche.
-                        let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
-                        if let Some(niche) = niche {
-                            match st.largest_niche {
-                                Some(largest_niche) => {
-                                    // Replace the existing niche even if they're equal,
-                                    // because this one is at a lower offset.
-                                    if largest_niche.available(dl) <= niche.available(dl) {
-                                        st.largest_niche = Some(niche);
-                                    }
-                                }
-                                None => st.largest_niche = Some(niche),
-                            }
-                        }
-                    }
-                    _ => assert!(
-                        start == Bound::Unbounded && end == Bound::Unbounded,
-                        "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
-                        def,
-                        st,
-                    ),
-                }
-
-                return Ok(tcx.intern_layout(st));
-            }
-
-            // At this point, we have handled all unions and
-            // structs. (We have also handled univariant enums
-            // that allow representation optimization.)
-            assert!(def.is_enum());
-
-            // Until we've decided whether to use the tagged or
-            // niche filling LayoutS, we don't want to intern the
-            // variant layouts, so we can't store them in the
-            // overall LayoutS. Store the overall LayoutS
-            // and the variant LayoutSs here until then.
-            struct TmpLayout<'tcx> {
-                layout: LayoutS<'tcx>,
-                variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
+                return Ok(tcx.intern_layout(
+                    cx.layout_of_union(&def.repr(), &variants).ok_or(LayoutError::Unknown(ty))?,
+                ));
             }
 
-            let calculate_niche_filling_layout =
-                || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
-                    // The current code for niche-filling relies on variant indices
-                    // instead of actual discriminants, so enums with
-                    // explicit discriminants (RFC #2363) would misbehave.
-                    if def.repr().inhibit_enum_layout_opt()
+            tcx.intern_layout(
+                cx.layout_of_struct_or_enum(
+                    &def.repr(),
+                    &variants,
+                    def.is_enum(),
+                    def.is_unsafe_cell(),
+                    tcx.layout_scalar_valid_range(def.did()),
+                    |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max),
+                    def.is_enum()
+                        .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
+                        .into_iter()
+                        .flatten(),
+                    def.repr().inhibit_enum_layout_opt()
                         || def
                             .variants()
                             .iter_enumerated()
-                            .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
-                    {
-                        return Ok(None);
-                    }
-
-                    if variants.len() < 2 {
-                        return Ok(None);
-                    }
-
-                    let mut align = dl.aggregate_align;
-                    let mut variant_layouts = variants
-                        .iter_enumerated()
-                        .map(|(j, v)| {
-                            let mut st = univariant_uninterned(
-                                cx,
-                                ty,
-                                v,
-                                &def.repr(),
-                                StructKind::AlwaysSized,
-                            )?;
-                            st.variants = Variants::Single { index: j };
-
-                            align = align.max(st.align);
-
-                            Ok(st)
-                        })
-                        .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
-                    let largest_variant_index = match variant_layouts
-                        .iter_enumerated()
-                        .max_by_key(|(_i, layout)| layout.size.bytes())
-                        .map(|(i, _layout)| i)
+                            .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())),
                     {
-                        None => return Ok(None),
-                        Some(i) => i,
-                    };
-
-                    let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
-                    let needs_disc = |index: VariantIdx| {
-                        index != largest_variant_index && !absent(&variants[index])
-                    };
-                    let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
-                        ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
-
-                    let count = niche_variants.size_hint().1.unwrap() as u128;
-
-                    // Find the field with the largest niche
-                    let (field_index, niche, (niche_start, niche_scalar)) = match variants
-                        [largest_variant_index]
-                        .iter()
-                        .enumerate()
-                        .filter_map(|(j, field)| Some((j, field.largest_niche?)))
-                        .max_by_key(|(_, niche)| niche.available(dl))
-                        .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
-                    {
-                        None => return Ok(None),
-                        Some(x) => x,
-                    };
-
-                    let niche_offset = niche.offset
-                        + variant_layouts[largest_variant_index].fields.offset(field_index);
-                    let niche_size = niche.value.size(dl);
-                    let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
-
-                    let all_variants_fit =
-                        variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
-                            if i == largest_variant_index {
-                                return true;
-                            }
-
-                            layout.largest_niche = None;
-
-                            if layout.size <= niche_offset {
-                                // This variant will fit before the niche.
-                                return true;
-                            }
-
-                            // Determine if it'll fit after the niche.
-                            let this_align = layout.align.abi;
-                            let this_offset = (niche_offset + niche_size).align_to(this_align);
-
-                            if this_offset + layout.size > size {
-                                return false;
-                            }
-
-                            // It'll fit, but we need to make some adjustments.
-                            match layout.fields {
-                                FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                                    for (j, offset) in offsets.iter_mut().enumerate() {
-                                        if !variants[i][j].is_zst() {
-                                            *offset += this_offset;
-                                        }
-                                    }
-                                }
-                                _ => {
-                                    panic!("Layout of fields should be Arbitrary for variants")
-                                }
-                            }
-
-                            // It can't be a Scalar or ScalarPair because the offset isn't 0.
-                            if !layout.abi.is_uninhabited() {
-                                layout.abi = Abi::Aggregate { sized: true };
-                            }
-                            layout.size += this_offset;
-
-                            true
-                        });
-
-                    if !all_variants_fit {
-                        return Ok(None);
-                    }
-
-                    let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
-
-                    let others_zst = variant_layouts
-                        .iter_enumerated()
-                        .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
-                    let same_size = size == variant_layouts[largest_variant_index].size;
-                    let same_align = align == variant_layouts[largest_variant_index].align;
-
-                    let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
-                        Abi::Uninhabited
-                    } else if same_size && same_align && others_zst {
-                        match variant_layouts[largest_variant_index].abi {
-                            // When the total alignment and size match, we can use the
-                            // same ABI as the scalar variant with the reserved niche.
-                            Abi::Scalar(_) => Abi::Scalar(niche_scalar),
-                            Abi::ScalarPair(first, second) => {
-                                // Only the niche is guaranteed to be initialised,
-                                // so use union layouts for the other primitive.
-                                if niche_offset == Size::ZERO {
-                                    Abi::ScalarPair(niche_scalar, second.to_union())
-                                } else {
-                                    Abi::ScalarPair(first.to_union(), niche_scalar)
-                                }
-                            }
-                            _ => Abi::Aggregate { sized: true },
-                        }
-                    } else {
-                        Abi::Aggregate { sized: true }
-                    };
-
-                    let layout = LayoutS {
-                        variants: Variants::Multiple {
-                            tag: niche_scalar,
-                            tag_encoding: TagEncoding::Niche {
-                                untagged_variant: largest_variant_index,
-                                niche_variants,
-                                niche_start,
-                            },
-                            tag_field: 0,
-                            variants: IndexVec::new(),
-                        },
-                        fields: FieldsShape::Arbitrary {
-                            offsets: vec![niche_offset],
-                            memory_index: vec![0],
-                        },
-                        abi,
-                        largest_niche,
-                        size,
-                        align,
-                    };
-
-                    Ok(Some(TmpLayout { layout, variants: variant_layouts }))
-                };
-
-            let niche_filling_layout = calculate_niche_filling_layout()?;
-
-            let (mut min, mut max) = (i128::MAX, i128::MIN);
-            let discr_type = def.repr().discr_type();
-            let bits = Integer::from_attr(cx, discr_type).size().bits();
-            for (i, discr) in def.discriminants(tcx) {
-                if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
-                    continue;
-                }
-                let mut x = discr.val as i128;
-                if discr_type.is_signed() {
-                    // sign extend the raw representation to be an i128
-                    x = (x << (128 - bits)) >> (128 - bits);
-                }
-                if x < min {
-                    min = x;
-                }
-                if x > max {
-                    max = x;
-                }
-            }
-            // We might have no inhabited variants, so pretend there's at least one.
-            if (min, max) == (i128::MAX, i128::MIN) {
-                min = 0;
-                max = 0;
-            }
-            assert!(min <= max, "discriminant range is {}...{}", min, max);
-            let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
-
-            let mut align = dl.aggregate_align;
-            let mut size = Size::ZERO;
-
-            // We're interested in the smallest alignment, so start large.
-            let mut start_align = Align::from_bytes(256).unwrap();
-            assert_eq!(Integer::for_align(dl, start_align), None);
-
-            // repr(C) on an enum tells us to make a (tag, union) layout,
-            // so we need to grow the prefix alignment to be at least
-            // the alignment of the union. (This value is used both for
-            // determining the alignment of the overall enum, and the
-            // determining the alignment of the payload after the tag.)
-            let mut prefix_align = min_ity.align(dl).abi;
-            if def.repr().c() {
-                for fields in &variants {
-                    for field in fields {
-                        prefix_align = prefix_align.max(field.align.abi);
-                    }
-                }
-            }
-
-            // Create the set of structs that represent each variant.
-            let mut layout_variants = variants
-                .iter_enumerated()
-                .map(|(i, field_layouts)| {
-                    let mut st = univariant_uninterned(
-                        cx,
-                        ty,
-                        &field_layouts,
-                        &def.repr(),
-                        StructKind::Prefixed(min_ity.size(), prefix_align),
-                    )?;
-                    st.variants = Variants::Single { index: i };
-                    // Find the first field we can't move later
-                    // to make room for a larger discriminant.
-                    for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
-                        if !field.is_zst() || field.align.abi.bytes() != 1 {
-                            start_align = start_align.min(field.align.abi);
-                            break;
-                        }
-                    }
-                    size = cmp::max(size, st.size);
-                    align = align.max(st.align);
-                    Ok(st)
-                })
-                .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
-            // Align the maximum variant size to the largest alignment.
-            size = size.align_to(align.abi);
-
-            if size.bytes() >= dl.obj_size_bound() {
-                return Err(LayoutError::SizeOverflow(ty));
-            }
-
-            let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
-            if typeck_ity < min_ity {
-                // It is a bug if Layout decided on a greater discriminant size than typeck for
-                // some reason at this point (based on values discriminant can take on). Mostly
-                // because this discriminant will be loaded, and then stored into variable of
-                // type calculated by typeck. Consider such case (a bug): typeck decided on
-                // byte-sized discriminant, but layout thinks we need a 16-bit to store all
-                // discriminant values. That would be a bug, because then, in codegen, in order
-                // to store this 16-bit discriminant into 8-bit sized temporary some of the
-                // space necessary to represent would have to be discarded (or layout is wrong
-                // on thinking it needs 16 bits)
-                bug!(
-                    "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
-                    min_ity,
-                    typeck_ity
-                );
-                // However, it is fine to make discr type however large (as an optimisation)
-                // after this point – we’ll just truncate the value we load in codegen.
-            }
-
-            // Check to see if we should use a different type for the
-            // discriminant. We can safely use a type with the same size
-            // as the alignment of the first field of each variant.
-            // We increase the size of the discriminant to avoid LLVM copying
-            // padding when it doesn't need to. This normally causes unaligned
-            // load/stores and excessive memcpy/memset operations. By using a
-            // bigger integer size, LLVM can be sure about its contents and
-            // won't be so conservative.
-
-            // Use the initial field alignment
-            let mut ity = if def.repr().c() || def.repr().int.is_some() {
-                min_ity
-            } else {
-                Integer::for_align(dl, start_align).unwrap_or(min_ity)
-            };
-
-            // If the alignment is not larger than the chosen discriminant size,
-            // don't use the alignment as the final size.
-            if ity <= min_ity {
-                ity = min_ity;
-            } else {
-                // Patch up the variants' first few fields.
-                let old_ity_size = min_ity.size();
-                let new_ity_size = ity.size();
-                for variant in &mut layout_variants {
-                    match variant.fields {
-                        FieldsShape::Arbitrary { ref mut offsets, .. } => {
-                            for i in offsets {
-                                if *i <= old_ity_size {
-                                    assert_eq!(*i, old_ity_size);
-                                    *i = new_ity_size;
+                        let param_env = tcx.param_env(def.did());
+                        def.is_struct()
+                            && match def.variants().iter().next().and_then(|x| x.fields.last()) {
+                                Some(last_field) => {
+                                    tcx.type_of(last_field.did).is_sized(tcx, param_env)
                                 }
+                                None => false,
                             }
-                            // We might be making the struct larger.
-                            if variant.size <= old_ity_size {
-                                variant.size = new_ity_size;
-                            }
-                        }
-                        _ => bug!(),
-                    }
-                }
-            }
-
-            let tag_mask = ity.size().unsigned_int_max();
-            let tag = Scalar::Initialized {
-                value: Int(ity, signed),
-                valid_range: WrappingRange {
-                    start: (min as u128 & tag_mask),
-                    end: (max as u128 & tag_mask),
-                },
-            };
-            let mut abi = Abi::Aggregate { sized: true };
-
-            if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
-                abi = Abi::Uninhabited;
-            } else if tag.size(dl) == size {
-                // Make sure we only use scalar layout when the enum is entirely its
-                // own tag (i.e. it has no padding nor any non-ZST variant fields).
-                abi = Abi::Scalar(tag);
-            } else {
-                // Try to use a ScalarPair for all tagged enums.
-                let mut common_prim = None;
-                let mut common_prim_initialized_in_all_variants = true;
-                for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
-                    let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
-                            bug!();
-                        };
-                    let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
-                    let (field, offset) = match (fields.next(), fields.next()) {
-                        (None, None) => {
-                            common_prim_initialized_in_all_variants = false;
-                            continue;
-                        }
-                        (Some(pair), None) => pair,
-                        _ => {
-                            common_prim = None;
-                            break;
-                        }
-                    };
-                    let prim = match field.abi {
-                        Abi::Scalar(scalar) => {
-                            common_prim_initialized_in_all_variants &=
-                                matches!(scalar, Scalar::Initialized { .. });
-                            scalar.primitive()
-                        }
-                        _ => {
-                            common_prim = None;
-                            break;
-                        }
-                    };
-                    if let Some(pair) = common_prim {
-                        // This is pretty conservative. We could go fancier
-                        // by conflating things like i32 and u32, or even
-                        // realising that (u8, u8) could just cohabit with
-                        // u16 or even u32.
-                        if pair != (prim, offset) {
-                            common_prim = None;
-                            break;
-                        }
-                    } else {
-                        common_prim = Some((prim, offset));
-                    }
-                }
-                if let Some((prim, offset)) = common_prim {
-                    let prim_scalar = if common_prim_initialized_in_all_variants {
-                        scalar_unit(prim)
-                    } else {
-                        // Common prim might be uninit.
-                        Scalar::Union { value: prim }
-                    };
-                    let pair = scalar_pair(cx, tag, prim_scalar);
-                    let pair_offsets = match pair.fields {
-                        FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
-                            assert_eq!(memory_index, &[0, 1]);
-                            offsets
-                        }
-                        _ => bug!(),
-                    };
-                    if pair_offsets[0] == Size::ZERO
-                        && pair_offsets[1] == *offset
-                        && align == pair.align
-                        && size == pair.size
-                    {
-                        // We can use `ScalarPair` only when it matches our
-                        // already computed layout (including `#[repr(C)]`).
-                        abi = pair.abi;
-                    }
-                }
-            }
-
-            // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
-            // variants to ensure they are consistent. This is because a downcast is
-            // semantically a NOP, and thus should not affect layout.
-            if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
-                for variant in &mut layout_variants {
-                    // We only do this for variants with fields; the others are not accessed anyway.
-                    // Also do not overwrite any already existing "clever" ABIs.
-                    if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
-                        variant.abi = abi;
-                        // Also need to bump up the size and alignment, so that the entire value fits in here.
-                        variant.size = cmp::max(variant.size, size);
-                        variant.align.abi = cmp::max(variant.align.abi, align.abi);
-                    }
-                }
-            }
-
-            let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
-
-            let tagged_layout = LayoutS {
-                variants: Variants::Multiple {
-                    tag,
-                    tag_encoding: TagEncoding::Direct,
-                    tag_field: 0,
-                    variants: IndexVec::new(),
-                },
-                fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
-                largest_niche,
-                abi,
-                align,
-                size,
-            };
-
-            let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
-
-            let mut best_layout = match (tagged_layout, niche_filling_layout) {
-                (tl, Some(nl)) => {
-                    // Pick the smaller layout; otherwise,
-                    // pick the layout with the larger niche; otherwise,
-                    // pick tagged as it has simpler codegen.
-                    use Ordering::*;
-                    let niche_size = |tmp_l: &TmpLayout<'_>| {
-                        tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
-                    };
-                    match (
-                        tl.layout.size.cmp(&nl.layout.size),
-                        niche_size(&tl).cmp(&niche_size(&nl)),
-                    ) {
-                        (Greater, _) => nl,
-                        (Equal, Less) => nl,
-                        _ => tl,
-                    }
-                }
-                (tl, None) => tl,
-            };
-
-            // Now we can intern the variant layouts and store them in the enum layout.
-            best_layout.layout.variants = match best_layout.layout.variants {
-                Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
-                    tag,
-                    tag_encoding,
-                    tag_field,
-                    variants: best_layout
-                        .variants
-                        .into_iter()
-                        .map(|layout| tcx.intern_layout(layout))
-                        .collect(),
-                },
-                _ => bug!(),
-            };
-
-            tcx.intern_layout(best_layout.layout)
+                    },
+                )
+                .ok_or(LayoutError::SizeOverflow(ty))?,
+            )
         }
 
         // Types with no meaningful known layout.
@@ -1505,8 +597,8 @@ fn generator_layout<'tcx>(
     let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
 
     let Some(info) = tcx.generator_layout(def_id) else {
-            return Err(LayoutError::Unknown(ty));
-        };
+        return Err(LayoutError::Unknown(ty));
+    };
     let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
 
     // Build a prefix layout, including "promoting" all ineligible
@@ -1609,8 +701,8 @@ fn generator_layout<'tcx>(
             variant.variants = Variants::Single { index };
 
             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
-                    bug!();
-                };
+                bug!();
+            };
 
             // Now, stitch the promoted and variant-only fields back together in
             // the order they are mentioned by our GeneratorLayout.
@@ -1657,13 +749,13 @@ fn generator_layout<'tcx>(
 
             size = size.max(variant.size);
             align = align.max(variant.align);
-            Ok(tcx.intern_layout(variant))
+            Ok(variant)
         })
         .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
     size = size.align_to(align.abi);
 
-    let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+    let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
         Abi::Uninhabited
     } else {
         Abi::Aggregate { sized: true }