10 #[cfg(feature = "randomize")]
11 use rand::{seq::SliceRandom, SeedableRng};
12 #[cfg(feature = "randomize")]
13 use rand_xoshiro::Xoshiro128StarStar;
17 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
18 // This is used to go between `memory_index` (source field order to memory order)
19 // and `inverse_memory_index` (memory order to source field order).
20 // See also `FieldsShape::Arbitrary::memory_index` for more details.
21 // FIXME(eddyb) build a better abstraction for permutations, if possible.
22 fn invert_mapping(map: &[u32]) -> Vec<u32> {
23 let mut inverse = vec![0; map.len()];
24 for i in 0..map.len() {
25 inverse[map[i] as usize] = i as u32;
30 pub trait LayoutCalculator {
31 type TargetDataLayoutRef: Borrow<TargetDataLayout>;
33 fn delay_bug(&self, txt: &str);
34 fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
36 fn scalar_pair<V: Idx>(&self, a: Scalar, b: Scalar) -> LayoutS<V> {
37 let dl = self.current_data_layout();
39 let b_align = b.align(dl);
40 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
41 let b_offset = a.size(dl).align_to(b_align.abi);
42 let size = (b_offset + b.size(dl)).align_to(align.abi);
44 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
45 // returns the last maximum.
46 let largest_niche = Niche::from_scalar(dl, b_offset, b)
48 .chain(Niche::from_scalar(dl, Size::ZERO, a))
49 .max_by_key(|niche| niche.available(dl));
52 variants: Variants::Single { index: V::new(0) },
53 fields: FieldsShape::Arbitrary {
54 offsets: vec![Size::ZERO, b_offset],
55 memory_index: vec![0, 1],
57 abi: Abi::ScalarPair(a, b),
64 fn univariant<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
66 dl: &TargetDataLayout,
70 ) -> Option<LayoutS<V>> {
72 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
73 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
74 let optimize = !repr.inhibit_struct_field_reordering_opt();
77 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
78 let optimizing = &mut inverse_memory_index[..end];
79 let effective_field_align = |f: &F| {
80 if let Some(pack) = pack {
81 // return the packed alignment in bytes
82 f.align.abi.min(pack).bytes()
84 // returns log2(effective-align).
85 // This is ok since `pack` applies to all fields equally.
86 // The calculation assumes that size is an integer multiple of align, except for ZSTs.
88 // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
89 f.align.abi.bytes().max(f.size.bytes()).trailing_zeros() as u64
93 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
94 // the field ordering to try and catch some code making assumptions about layouts
96 if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
97 #[cfg(feature = "randomize")]
99 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
100 // randomize field ordering with
101 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
103 // Shuffle the ordering of the fields
104 optimizing.shuffle(&mut rng);
106 // Otherwise we just leave things alone and actually optimize the type's fields
109 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
110 optimizing.sort_by_key(|&x| {
111 // Place ZSTs first to avoid "interesting offsets",
112 // especially with only one or two non-ZST fields.
113 // Then place largest alignments first, largest niches within an alignment group last
114 let f = &fields[x as usize];
115 let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
116 (!f.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
120 StructKind::Prefixed(..) => {
121 // Sort in ascending alignment so that the layout stays optimal
122 // regardless of the prefix.
123 // And put the largest niche in an alignment group at the end
124 // so it can be used as discriminant in jagged enums
125 optimizing.sort_by_key(|&x| {
126 let f = &fields[x as usize];
127 let niche_size = f.largest_niche.map_or(0, |n| n.available(dl));
128 (effective_field_align(f), niche_size)
133 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
134 // regardless of the status of `-Z randomize-layout`
137 // inverse_memory_index holds field indices by increasing memory offset.
138 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
139 // We now write field offsets to the corresponding offset slot;
140 // field 5 with offset 0 puts 0 in offsets[5].
141 // At the bottom of this function, we invert `inverse_memory_index` to
142 // produce `memory_index` (see `invert_mapping`).
143 let mut sized = true;
144 let mut offsets = vec![Size::ZERO; fields.len()];
145 let mut offset = Size::ZERO;
146 let mut largest_niche = None;
147 let mut largest_niche_available = 0;
148 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
150 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
151 align = align.max(AbiAndPrefAlign::new(prefix_align));
152 offset = prefix_size.align_to(prefix_align);
154 for &i in &inverse_memory_index {
155 let field = &fields[i as usize];
157 self.delay_bug(&format!(
158 "univariant: field #{} comes after unsized field",
163 if field.is_unsized() {
167 // Invariant: offset < dl.obj_size_bound() <= 1<<61
168 let field_align = if let Some(pack) = pack {
169 field.align.min(AbiAndPrefAlign::new(pack))
173 offset = offset.align_to(field_align.abi);
174 align = align.max(field_align);
176 debug!("univariant offset: {:?} field: {:#?}", offset, field);
177 offsets[i as usize] = offset;
179 if let Some(mut niche) = field.largest_niche {
180 let available = niche.available(dl);
181 if available > largest_niche_available {
182 largest_niche_available = available;
183 niche.offset += offset;
184 largest_niche = Some(niche);
188 offset = offset.checked_add(field.size, dl)?;
190 if let Some(repr_align) = repr.align {
191 align = align.max(AbiAndPrefAlign::new(repr_align));
193 debug!("univariant min_size: {:?}", offset);
194 let min_size = offset;
195 // As stated above, inverse_memory_index holds field indices by increasing offset.
196 // This makes it an already-sorted view of the offsets vec.
197 // To invert it, consider:
198 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
199 // Field 5 would be the first element, so memory_index is i:
200 // Note: if we didn't optimize, it's already right.
202 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
203 let size = min_size.align_to(align.abi);
204 let mut abi = Abi::Aggregate { sized };
205 // Unpack newtype ABIs and find scalar pairs.
206 if sized && size.bytes() > 0 {
207 // All other fields must be ZSTs.
208 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
210 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
211 // We have exactly one non-ZST field.
212 (Some((i, field)), None, None) => {
213 // Field fills the struct and it has a scalar or scalar pair ABI.
214 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
217 // For plain scalars, or vectors of them, we can't unpack
218 // newtypes for `#[repr(C)]`, as that affects C ABIs.
219 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
222 // But scalar pairs are Rust-specific and get
223 // treated as aggregates by C ABIs anyway.
224 Abi::ScalarPair(..) => {
232 // Two non-ZST fields, and they're both scalars.
233 (Some((i, a)), Some((j, b)), None) => {
234 match (a.abi, b.abi) {
235 (Abi::Scalar(a), Abi::Scalar(b)) => {
236 // Order by the memory placement, not source order.
237 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
242 let pair = self.scalar_pair::<V>(a, b);
243 let pair_offsets = match pair.fields {
244 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
245 assert_eq!(memory_index, &[0, 1]);
250 if offsets[i] == pair_offsets[0]
251 && offsets[j] == pair_offsets[1]
252 && align == pair.align
255 // We can use `ScalarPair` only when it matches our
256 // already computed layout (including `#[repr(C)]`).
267 if fields.iter().any(|f| f.abi.is_uninhabited()) {
268 abi = Abi::Uninhabited;
271 variants: Variants::Single { index: V::new(0) },
272 fields: FieldsShape::Arbitrary { offsets, memory_index },
280 fn layout_of_never_type<V: Idx>(&self) -> LayoutS<V> {
281 let dl = self.current_data_layout();
282 let dl = dl.borrow();
284 variants: Variants::Single { index: V::new(0) },
285 fields: FieldsShape::Primitive,
286 abi: Abi::Uninhabited,
293 fn layout_of_struct_or_enum<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
296 variants: &IndexVec<V, Vec<F>>,
298 is_unsafe_cell: bool,
299 scalar_valid_range: (Bound<u128>, Bound<u128>),
300 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
301 discriminants: impl Iterator<Item = (V, i128)>,
302 niche_optimize_enum: bool,
304 ) -> Option<LayoutS<V>> {
305 let dl = self.current_data_layout();
306 let dl = dl.borrow();
308 let scalar_unit = |value: Primitive| {
309 let size = value.size(dl);
310 assert!(size.bits() <= 128);
311 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
314 // A variant is absent if it's uninhabited and only has ZST fields.
315 // Present uninhabited variants only require space for their fields,
316 // but *not* an encoding of the discriminant (e.g., a tag value).
317 // See issue #49298 for more details on the need to leave space
318 // for non-ZST uninhabited data (mostly partial initialization).
319 let absent = |fields: &[F]| {
320 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
321 let is_zst = fields.iter().all(|f| f.is_zst());
322 uninhabited && is_zst
324 let (present_first, present_second) = {
325 let mut present_variants = variants
327 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
328 (present_variants.next(), present_variants.next())
330 let present_first = match present_first {
331 Some(present_first) => present_first,
332 // Uninhabited because it has no variants, or only absent ones.
334 return Some(self.layout_of_never_type());
336 // If it's a struct, still compute a layout so that we can still compute the
341 let is_struct = !is_enum ||
342 // Only one variant is present.
343 (present_second.is_none() &&
344 // Representation optimizations are allowed.
345 !repr.inhibit_enum_layout_opt());
347 // Struct, or univariant enum equivalent to a struct.
348 // (Typechecking will reject discriminant-sizing attrs.)
350 let v = present_first;
351 let kind = if is_enum || variants[v].is_empty() {
352 StructKind::AlwaysSized
354 if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
357 let mut st = self.univariant(dl, &variants[v], &repr, kind)?;
358 st.variants = Variants::Single { index: v };
361 let hide_niches = |scalar: &mut _| match scalar {
362 Scalar::Initialized { value, valid_range } => {
363 *valid_range = WrappingRange::full(value.size(dl))
365 // Already doesn't have any niches
366 Scalar::Union { .. } => {}
369 Abi::Uninhabited => {}
370 Abi::Scalar(scalar) => hide_niches(scalar),
371 Abi::ScalarPair(a, b) => {
375 Abi::Vector { element, count: _ } => hide_niches(element),
376 Abi::Aggregate { sized: _ } => {}
378 st.largest_niche = None;
382 let (start, end) = scalar_valid_range;
384 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
385 // the asserts ensure that we are not using the
386 // `#[rustc_layout_scalar_valid_range(n)]`
387 // attribute to widen the range of anything as that would probably
388 // result in UB somewhere
389 // FIXME(eddyb) the asserts are probably not needed,
390 // as larger validity ranges would result in missed
391 // optimizations, *not* wrongly assuming the inner
392 // value is valid. e.g. unions enlarge validity ranges,
393 // because the values may be uninitialized.
394 if let Bound::Included(start) = start {
395 // FIXME(eddyb) this might be incorrect - it doesn't
396 // account for wrap-around (end < start) ranges.
397 let valid_range = scalar.valid_range_mut();
398 assert!(valid_range.start <= start);
399 valid_range.start = start;
401 if let Bound::Included(end) = end {
402 // FIXME(eddyb) this might be incorrect - it doesn't
403 // account for wrap-around (end < start) ranges.
404 let valid_range = scalar.valid_range_mut();
405 assert!(valid_range.end >= end);
406 valid_range.end = end;
409 // Update `largest_niche` if we have introduced a larger niche.
410 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
411 if let Some(niche) = niche {
412 match st.largest_niche {
413 Some(largest_niche) => {
414 // Replace the existing niche even if they're equal,
415 // because this one is at a lower offset.
416 if largest_niche.available(dl) <= niche.available(dl) {
417 st.largest_niche = Some(niche);
420 None => st.largest_niche = Some(niche),
425 start == Bound::Unbounded && end == Bound::Unbounded,
426 "nonscalar layout for layout_scalar_valid_range type: {:#?}",
434 // At this point, we have handled all unions and
435 // structs. (We have also handled univariant enums
436 // that allow representation optimization.)
439 // Until we've decided whether to use the tagged or
440 // niche filling LayoutS, we don't want to intern the
441 // variant layouts, so we can't store them in the
442 // overall LayoutS. Store the overall LayoutS
443 // and the variant LayoutSs here until then.
444 struct TmpLayout<V: Idx> {
446 variants: IndexVec<V, LayoutS<V>>,
449 let calculate_niche_filling_layout = || -> Option<TmpLayout<V>> {
450 if niche_optimize_enum {
454 if variants.len() < 2 {
458 let mut align = dl.aggregate_align;
459 let mut variant_layouts = variants
462 let mut st = self.univariant(dl, v, &repr, StructKind::AlwaysSized)?;
463 st.variants = Variants::Single { index: j };
465 align = align.max(st.align);
469 .collect::<Option<IndexVec<V, _>>>()?;
471 let largest_variant_index = variant_layouts
473 .max_by_key(|(_i, layout)| layout.size.bytes())
474 .map(|(i, _layout)| i)?;
476 let all_indices = (0..=variants.len() - 1).map(V::new);
477 let needs_disc = |index: V| index != largest_variant_index && !absent(&variants[index]);
478 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap().index()
479 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap().index();
481 let count = niche_variants.size_hint().1.unwrap() as u128;
483 // Find the field with the largest niche
484 let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
487 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
488 .max_by_key(|(_, niche)| niche.available(dl))
489 .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
491 niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
492 let niche_size = niche.value.size(dl);
493 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
495 let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
496 if i == largest_variant_index {
500 layout.largest_niche = None;
502 if layout.size <= niche_offset {
503 // This variant will fit before the niche.
507 // Determine if it'll fit after the niche.
508 let this_align = layout.align.abi;
509 let this_offset = (niche_offset + niche_size).align_to(this_align);
511 if this_offset + layout.size > size {
515 // It'll fit, but we need to make some adjustments.
516 match layout.fields {
517 FieldsShape::Arbitrary { ref mut offsets, .. } => {
518 for (j, offset) in offsets.iter_mut().enumerate() {
519 if !variants[i][j].is_zst() {
520 *offset += this_offset;
525 panic!("Layout of fields should be Arbitrary for variants")
529 // It can't be a Scalar or ScalarPair because the offset isn't 0.
530 if !layout.abi.is_uninhabited() {
531 layout.abi = Abi::Aggregate { sized: true };
533 layout.size += this_offset;
538 if !all_variants_fit {
542 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
544 let others_zst = variant_layouts
546 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
547 let same_size = size == variant_layouts[largest_variant_index].size;
548 let same_align = align == variant_layouts[largest_variant_index].align;
550 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
552 } else if same_size && same_align && others_zst {
553 match variant_layouts[largest_variant_index].abi {
554 // When the total alignment and size match, we can use the
555 // same ABI as the scalar variant with the reserved niche.
556 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
557 Abi::ScalarPair(first, second) => {
558 // Only the niche is guaranteed to be initialised,
559 // so use union layouts for the other primitive.
560 if niche_offset == Size::ZERO {
561 Abi::ScalarPair(niche_scalar, second.to_union())
563 Abi::ScalarPair(first.to_union(), niche_scalar)
566 _ => Abi::Aggregate { sized: true },
569 Abi::Aggregate { sized: true }
572 let layout = LayoutS {
573 variants: Variants::Multiple {
575 tag_encoding: TagEncoding::Niche {
576 untagged_variant: largest_variant_index,
577 niche_variants: (V::new(*niche_variants.start())
578 ..=V::new(*niche_variants.end())),
582 variants: IndexVec::new(),
584 fields: FieldsShape::Arbitrary {
585 offsets: vec![niche_offset],
586 memory_index: vec![0],
594 Some(TmpLayout { layout, variants: variant_layouts })
597 let niche_filling_layout = calculate_niche_filling_layout();
599 let (mut min, mut max) = (i128::MAX, i128::MIN);
600 let discr_type = repr.discr_type();
601 let bits = Integer::from_attr(dl, discr_type).size().bits();
602 for (i, mut val) in discriminants {
603 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
606 if discr_type.is_signed() {
607 // sign extend the raw representation to be an i128
608 val = (val << (128 - bits)) >> (128 - bits);
617 // We might have no inhabited variants, so pretend there's at least one.
618 if (min, max) == (i128::MAX, i128::MIN) {
622 assert!(min <= max, "discriminant range is {}...{}", min, max);
623 let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
625 let mut align = dl.aggregate_align;
626 let mut size = Size::ZERO;
628 // We're interested in the smallest alignment, so start large.
629 let mut start_align = Align::from_bytes(256).unwrap();
630 assert_eq!(Integer::for_align(dl, start_align), None);
632 // repr(C) on an enum tells us to make a (tag, union) layout,
633 // so we need to grow the prefix alignment to be at least
634 // the alignment of the union. (This value is used both for
635 // determining the alignment of the overall enum, and the
636 // determining the alignment of the payload after the tag.)
637 let mut prefix_align = min_ity.align(dl).abi;
639 for fields in variants {
640 for field in fields {
641 prefix_align = prefix_align.max(field.align.abi);
646 // Create the set of structs that represent each variant.
647 let mut layout_variants = variants
649 .map(|(i, field_layouts)| {
650 let mut st = self.univariant(
654 StructKind::Prefixed(min_ity.size(), prefix_align),
656 st.variants = Variants::Single { index: i };
657 // Find the first field we can't move later
658 // to make room for a larger discriminant.
659 for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) {
660 if !field.is_zst() || field.align.abi.bytes() != 1 {
661 start_align = start_align.min(field.align.abi);
665 size = cmp::max(size, st.size);
666 align = align.max(st.align);
669 .collect::<Option<IndexVec<V, _>>>()?;
671 // Align the maximum variant size to the largest alignment.
672 size = size.align_to(align.abi);
674 if size.bytes() >= dl.obj_size_bound() {
678 let typeck_ity = Integer::from_attr(dl, repr.discr_type());
679 if typeck_ity < min_ity {
680 // It is a bug if Layout decided on a greater discriminant size than typeck for
681 // some reason at this point (based on values discriminant can take on). Mostly
682 // because this discriminant will be loaded, and then stored into variable of
683 // type calculated by typeck. Consider such case (a bug): typeck decided on
684 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
685 // discriminant values. That would be a bug, because then, in codegen, in order
686 // to store this 16-bit discriminant into 8-bit sized temporary some of the
687 // space necessary to represent would have to be discarded (or layout is wrong
688 // on thinking it needs 16 bits)
690 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
693 // However, it is fine to make discr type however large (as an optimisation)
694 // after this point – we’ll just truncate the value we load in codegen.
697 // Check to see if we should use a different type for the
698 // discriminant. We can safely use a type with the same size
699 // as the alignment of the first field of each variant.
700 // We increase the size of the discriminant to avoid LLVM copying
701 // padding when it doesn't need to. This normally causes unaligned
702 // load/stores and excessive memcpy/memset operations. By using a
703 // bigger integer size, LLVM can be sure about its contents and
704 // won't be so conservative.
706 // Use the initial field alignment
707 let mut ity = if repr.c() || repr.int.is_some() {
710 Integer::for_align(dl, start_align).unwrap_or(min_ity)
713 // If the alignment is not larger than the chosen discriminant size,
714 // don't use the alignment as the final size.
718 // Patch up the variants' first few fields.
719 let old_ity_size = min_ity.size();
720 let new_ity_size = ity.size();
721 for variant in &mut layout_variants {
722 match variant.fields {
723 FieldsShape::Arbitrary { ref mut offsets, .. } => {
725 if *i <= old_ity_size {
726 assert_eq!(*i, old_ity_size);
730 // We might be making the struct larger.
731 if variant.size <= old_ity_size {
732 variant.size = new_ity_size;
740 let tag_mask = ity.size().unsigned_int_max();
741 let tag = Scalar::Initialized {
742 value: Int(ity, signed),
743 valid_range: WrappingRange {
744 start: (min as u128 & tag_mask),
745 end: (max as u128 & tag_mask),
748 let mut abi = Abi::Aggregate { sized: true };
750 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
751 abi = Abi::Uninhabited;
752 } else if tag.size(dl) == size {
753 // Make sure we only use scalar layout when the enum is entirely its
754 // own tag (i.e. it has no padding nor any non-ZST variant fields).
755 abi = Abi::Scalar(tag);
757 // Try to use a ScalarPair for all tagged enums.
758 let mut common_prim = None;
759 let mut common_prim_initialized_in_all_variants = true;
760 for (field_layouts, layout_variant) in iter::zip(&*variants, &layout_variants) {
761 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
764 let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
765 let (field, offset) = match (fields.next(), fields.next()) {
767 common_prim_initialized_in_all_variants = false;
770 (Some(pair), None) => pair,
776 let prim = match field.abi {
777 Abi::Scalar(scalar) => {
778 common_prim_initialized_in_all_variants &=
779 matches!(scalar, Scalar::Initialized { .. });
787 if let Some(pair) = common_prim {
788 // This is pretty conservative. We could go fancier
789 // by conflating things like i32 and u32, or even
790 // realising that (u8, u8) could just cohabit with
792 if pair != (prim, offset) {
797 common_prim = Some((prim, offset));
800 if let Some((prim, offset)) = common_prim {
801 let prim_scalar = if common_prim_initialized_in_all_variants {
804 // Common prim might be uninit.
805 Scalar::Union { value: prim }
807 let pair = self.scalar_pair::<V>(tag, prim_scalar);
808 let pair_offsets = match pair.fields {
809 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
810 assert_eq!(memory_index, &[0, 1]);
815 if pair_offsets[0] == Size::ZERO
816 && pair_offsets[1] == *offset
817 && align == pair.align
820 // We can use `ScalarPair` only when it matches our
821 // already computed layout (including `#[repr(C)]`).
827 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
828 // variants to ensure they are consistent. This is because a downcast is
829 // semantically a NOP, and thus should not affect layout.
830 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
831 for variant in &mut layout_variants {
832 // We only do this for variants with fields; the others are not accessed anyway.
833 // Also do not overwrite any already existing "clever" ABIs.
834 if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
836 // Also need to bump up the size and alignment, so that the entire value fits in here.
837 variant.size = cmp::max(variant.size, size);
838 variant.align.abi = cmp::max(variant.align.abi, align.abi);
843 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
845 let tagged_layout = LayoutS {
846 variants: Variants::Multiple {
848 tag_encoding: TagEncoding::Direct,
850 variants: IndexVec::new(),
852 fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
859 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
861 let mut best_layout = match (tagged_layout, niche_filling_layout) {
863 // Pick the smaller layout; otherwise,
864 // pick the layout with the larger niche; otherwise,
865 // pick tagged as it has simpler codegen.
866 use cmp::Ordering::*;
867 let niche_size = |tmp_l: &TmpLayout<V>| {
868 tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
870 match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
879 // Now we can intern the variant layouts and store them in the enum layout.
880 best_layout.layout.variants = match best_layout.layout.variants {
881 Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
882 Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
886 Some(best_layout.layout)
889 fn layout_of_union<'a, V: Idx, F: Deref<Target = &'a LayoutS<V>> + Debug>(
892 variants: &IndexVec<V, Vec<F>>,
893 ) -> Option<LayoutS<V>> {
894 let dl = self.current_data_layout();
895 let dl = dl.borrow();
896 let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
898 if let Some(repr_align) = repr.align {
899 align = align.max(AbiAndPrefAlign::new(repr_align));
902 let optimize = !repr.inhibit_union_abi_opt();
903 let mut size = Size::ZERO;
904 let mut abi = Abi::Aggregate { sized: true };
905 let index = V::new(0);
906 for field in &variants[index] {
907 assert!(field.is_sized());
908 align = align.max(field.align);
910 // If all non-ZST fields have the same ABI, forward this ABI
911 if optimize && !field.is_zst() {
912 // Discard valid range information and allow undef
913 let field_abi = match field.abi {
914 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
915 Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
916 Abi::Vector { element: x, count } => {
917 Abi::Vector { element: x.to_union(), count }
919 Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
922 if size == Size::ZERO {
923 // first non ZST: initialize 'abi'
925 } else if abi != field_abi {
926 // different fields have different ABI: reset to Aggregate
927 abi = Abi::Aggregate { sized: true };
931 size = cmp::max(size, field.size);
934 if let Some(pack) = repr.pack {
935 align = align.min(AbiAndPrefAlign::new(pack));
939 variants: Variants::Single { index },
940 fields: FieldsShape::Union(NonZeroUsize::new(variants[index].len())?),
944 size: size.align_to(align.abi),