]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #95634 - dtolnay:mailmap, r=Mark-Simulacrum
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_data_structures::intern::Interned;
9 use rustc_hir as hir;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
18 };
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
21
22 use std::cmp;
23 use std::fmt;
24 use std::iter;
25 use std::num::NonZeroUsize;
26 use std::ops::Bound;
27
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
30
31 pub fn provide(providers: &mut ty::query::Providers) {
32     *providers =
33         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
34 }
35
36 pub trait IntegerExt {
37     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
41     fn repr_discr<'tcx>(
42         tcx: TyCtxt<'tcx>,
43         ty: Ty<'tcx>,
44         repr: &ReprOptions,
45         min: i128,
46         max: i128,
47     ) -> (Integer, bool);
48 }
49
50 impl IntegerExt for Integer {
51     #[inline]
52     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53         match (*self, signed) {
54             (I8, false) => tcx.types.u8,
55             (I16, false) => tcx.types.u16,
56             (I32, false) => tcx.types.u32,
57             (I64, false) => tcx.types.u64,
58             (I128, false) => tcx.types.u128,
59             (I8, true) => tcx.types.i8,
60             (I16, true) => tcx.types.i16,
61             (I32, true) => tcx.types.i32,
62             (I64, true) => tcx.types.i64,
63             (I128, true) => tcx.types.i128,
64         }
65     }
66
67     /// Gets the Integer type from an attr::IntType.
68     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69         let dl = cx.data_layout();
70
71         match ity {
72             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78                 dl.ptr_sized_integer()
79             }
80         }
81     }
82
83     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
84         match ity {
85             ty::IntTy::I8 => I8,
86             ty::IntTy::I16 => I16,
87             ty::IntTy::I32 => I32,
88             ty::IntTy::I64 => I64,
89             ty::IntTy::I128 => I128,
90             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
91         }
92     }
93     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
94         match ity {
95             ty::UintTy::U8 => I8,
96             ty::UintTy::U16 => I16,
97             ty::UintTy::U32 => I32,
98             ty::UintTy::U64 => I64,
99             ty::UintTy::U128 => I128,
100             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
101         }
102     }
103
104     /// Finds the appropriate Integer type and signedness for the given
105     /// signed discriminant range and `#[repr]` attribute.
106     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107     /// that shouldn't affect anything, other than maybe debuginfo.
108     fn repr_discr<'tcx>(
109         tcx: TyCtxt<'tcx>,
110         ty: Ty<'tcx>,
111         repr: &ReprOptions,
112         min: i128,
113         max: i128,
114     ) -> (Integer, bool) {
115         // Theoretically, negative values could be larger in unsigned representation
116         // than the unsigned representation of the signed minimum. However, if there
117         // are any negative values, the only valid unsigned representation is u128
118         // which can fit all i128 values, so the result remains unaffected.
119         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
121
122         if let Some(ity) = repr.int {
123             let discr = Integer::from_attr(&tcx, ity);
124             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
125             if discr < fit {
126                 bug!(
127                     "Integer::repr_discr: `#[repr]` hint too small for \
128                       discriminant range of enum `{}",
129                     ty
130                 )
131             }
132             return (discr, ity.is_signed());
133         }
134
135         let at_least = if repr.c() {
136             // This is usually I32, however it can be different on some platforms,
137             // notably hexagon and arm-none/thumb-none
138             tcx.data_layout().c_enum_min_size
139         } else {
140             // repr(Rust) enums try to be as small as possible
141             I8
142         };
143
144         // If there are no negative values, we can use the unsigned fit.
145         if min >= 0 {
146             (cmp::max(unsigned_fit, at_least), false)
147         } else {
148             (cmp::max(signed_fit, at_least), true)
149         }
150     }
151 }
152
153 pub trait PrimitiveExt {
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 }
157
158 impl PrimitiveExt for Primitive {
159     #[inline]
160     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161         match *self {
162             Int(i, signed) => i.to_ty(tcx, signed),
163             F32 => tcx.types.f32,
164             F64 => tcx.types.f64,
165             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
166         }
167     }
168
169     /// Return an *integer* type matching this primitive.
170     /// Useful in particular when dealing with enum discriminants.
171     #[inline]
172     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173         match *self {
174             Int(i, signed) => i.to_ty(tcx, signed),
175             Pointer => tcx.types.usize,
176             F32 | F64 => bug!("floats do not have an int type"),
177         }
178     }
179 }
180
181 /// The first half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
186
187 /// The second half of a fat pointer.
188 ///
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
192
193 /// The maximum supported number of lanes in a SIMD vector.
194 ///
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
202     Unknown(Ty<'tcx>),
203     SizeOverflow(Ty<'tcx>),
204     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
205 }
206
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209         match *self {
210             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211             LayoutError::SizeOverflow(ty) => {
212                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213             }
214             LayoutError::NormalizationFailure(t, e) => write!(
215                 f,
216                 "unable to determine layout for `{}` because `{}` cannot be normalized",
217                 t,
218                 e.get_type_for_failure()
219             ),
220         }
221     }
222 }
223
224 #[instrument(skip(tcx, query), level = "debug")]
225 fn layout_of<'tcx>(
226     tcx: TyCtxt<'tcx>,
227     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
228 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
229     ty::tls::with_related_context(tcx, move |icx| {
230         let (param_env, ty) = query.into_parts();
231         debug!(?ty);
232
233         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
234             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
235         }
236
237         // Update the ImplicitCtxt to increase the layout_depth
238         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
239
240         ty::tls::enter_context(&icx, |_| {
241             let param_env = param_env.with_reveal_all_normalized(tcx);
242             let unnormalized_ty = ty;
243
244             // FIXME: We might want to have two different versions of `layout_of`:
245             // One that can be called after typecheck has completed and can use
246             // `normalize_erasing_regions` here and another one that can be called
247             // before typecheck has completed and uses `try_normalize_erasing_regions`.
248             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
249                 Ok(t) => t,
250                 Err(normalization_error) => {
251                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
252                 }
253             };
254
255             if ty != unnormalized_ty {
256                 // Ensure this layout is also cached for the normalized type.
257                 return tcx.layout_of(param_env.and(ty));
258             }
259
260             let cx = LayoutCx { tcx, param_env };
261
262             let layout = cx.layout_of_uncached(ty)?;
263             let layout = TyAndLayout { ty, layout };
264
265             cx.record_layout_for_printing(layout);
266
267             // Type-level uninhabitedness should always imply ABI uninhabitedness.
268             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
269                 assert!(layout.abi.is_uninhabited());
270             }
271
272             Ok(layout)
273         })
274     })
275 }
276
277 pub struct LayoutCx<'tcx, C> {
278     pub tcx: C,
279     pub param_env: ty::ParamEnv<'tcx>,
280 }
281
282 #[derive(Copy, Clone, Debug)]
283 enum StructKind {
284     /// A tuple, closure, or univariant which cannot be coerced to unsized.
285     AlwaysSized,
286     /// A univariant, the last field of which may be coerced to unsized.
287     MaybeUnsized,
288     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
289     Prefixed(Size, Align),
290 }
291
292 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
293 // This is used to go between `memory_index` (source field order to memory order)
294 // and `inverse_memory_index` (memory order to source field order).
295 // See also `FieldsShape::Arbitrary::memory_index` for more details.
296 // FIXME(eddyb) build a better abstraction for permutations, if possible.
297 fn invert_mapping(map: &[u32]) -> Vec<u32> {
298     let mut inverse = vec![0; map.len()];
299     for i in 0..map.len() {
300         inverse[map[i] as usize] = i as u32;
301     }
302     inverse
303 }
304
305 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
306     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
307         let dl = self.data_layout();
308         let b_align = b.align(dl);
309         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
310         let b_offset = a.size(dl).align_to(b_align.abi);
311         let size = (b_offset + b.size(dl)).align_to(align.abi);
312
313         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
314         // returns the last maximum.
315         let largest_niche = Niche::from_scalar(dl, b_offset, b)
316             .into_iter()
317             .chain(Niche::from_scalar(dl, Size::ZERO, a))
318             .max_by_key(|niche| niche.available(dl));
319
320         LayoutS {
321             variants: Variants::Single { index: VariantIdx::new(0) },
322             fields: FieldsShape::Arbitrary {
323                 offsets: vec![Size::ZERO, b_offset],
324                 memory_index: vec![0, 1],
325             },
326             abi: Abi::ScalarPair(a, b),
327             largest_niche,
328             align,
329             size,
330         }
331     }
332
333     fn univariant_uninterned(
334         &self,
335         ty: Ty<'tcx>,
336         fields: &[TyAndLayout<'_>],
337         repr: &ReprOptions,
338         kind: StructKind,
339     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
340         let dl = self.data_layout();
341         let pack = repr.pack;
342         if pack.is_some() && repr.align.is_some() {
343             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
344             return Err(LayoutError::Unknown(ty));
345         }
346
347         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
348
349         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
350
351         let optimize = !repr.inhibit_struct_field_reordering_opt();
352         if optimize {
353             let end =
354                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
355             let optimizing = &mut inverse_memory_index[..end];
356             let field_align = |f: &TyAndLayout<'_>| {
357                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
358             };
359
360             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
361             // the field ordering to try and catch some code making assumptions about layouts
362             // we don't guarantee
363             if repr.can_randomize_type_layout() {
364                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
365                 // randomize field ordering with
366                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
367
368                 // Shuffle the ordering of the fields
369                 optimizing.shuffle(&mut rng);
370
371             // Otherwise we just leave things alone and actually optimize the type's fields
372             } else {
373                 match kind {
374                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
375                         optimizing.sort_by_key(|&x| {
376                             // Place ZSTs first to avoid "interesting offsets",
377                             // especially with only one or two non-ZST fields.
378                             let f = &fields[x as usize];
379                             (!f.is_zst(), cmp::Reverse(field_align(f)))
380                         });
381                     }
382
383                     StructKind::Prefixed(..) => {
384                         // Sort in ascending alignment so that the layout stays optimal
385                         // regardless of the prefix
386                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
387                     }
388                 }
389
390                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
391                 //                 regardless of the status of `-Z randomize-layout`
392             }
393         }
394
395         // inverse_memory_index holds field indices by increasing memory offset.
396         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
397         // We now write field offsets to the corresponding offset slot;
398         // field 5 with offset 0 puts 0 in offsets[5].
399         // At the bottom of this function, we invert `inverse_memory_index` to
400         // produce `memory_index` (see `invert_mapping`).
401
402         let mut sized = true;
403         let mut offsets = vec![Size::ZERO; fields.len()];
404         let mut offset = Size::ZERO;
405         let mut largest_niche = None;
406         let mut largest_niche_available = 0;
407
408         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
409             let prefix_align =
410                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
411             align = align.max(AbiAndPrefAlign::new(prefix_align));
412             offset = prefix_size.align_to(prefix_align);
413         }
414
415         for &i in &inverse_memory_index {
416             let field = fields[i as usize];
417             if !sized {
418                 self.tcx.sess.delay_span_bug(
419                     DUMMY_SP,
420                     &format!(
421                         "univariant: field #{} of `{}` comes after unsized field",
422                         offsets.len(),
423                         ty
424                     ),
425                 );
426             }
427
428             if field.is_unsized() {
429                 sized = false;
430             }
431
432             // Invariant: offset < dl.obj_size_bound() <= 1<<61
433             let field_align = if let Some(pack) = pack {
434                 field.align.min(AbiAndPrefAlign::new(pack))
435             } else {
436                 field.align
437             };
438             offset = offset.align_to(field_align.abi);
439             align = align.max(field_align);
440
441             debug!("univariant offset: {:?} field: {:#?}", offset, field);
442             offsets[i as usize] = offset;
443
444             if !repr.hide_niche() {
445                 if let Some(mut niche) = field.largest_niche {
446                     let available = niche.available(dl);
447                     if available > largest_niche_available {
448                         largest_niche_available = available;
449                         niche.offset += offset;
450                         largest_niche = Some(niche);
451                     }
452                 }
453             }
454
455             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
456         }
457
458         if let Some(repr_align) = repr.align {
459             align = align.max(AbiAndPrefAlign::new(repr_align));
460         }
461
462         debug!("univariant min_size: {:?}", offset);
463         let min_size = offset;
464
465         // As stated above, inverse_memory_index holds field indices by increasing offset.
466         // This makes it an already-sorted view of the offsets vec.
467         // To invert it, consider:
468         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
469         // Field 5 would be the first element, so memory_index is i:
470         // Note: if we didn't optimize, it's already right.
471
472         let memory_index =
473             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
474
475         let size = min_size.align_to(align.abi);
476         let mut abi = Abi::Aggregate { sized };
477
478         // Unpack newtype ABIs and find scalar pairs.
479         if sized && size.bytes() > 0 {
480             // All other fields must be ZSTs.
481             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
482
483             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
484                 // We have exactly one non-ZST field.
485                 (Some((i, field)), None, None) => {
486                     // Field fills the struct and it has a scalar or scalar pair ABI.
487                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
488                     {
489                         match field.abi {
490                             // For plain scalars, or vectors of them, we can't unpack
491                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
492                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
493                                 abi = field.abi;
494                             }
495                             // But scalar pairs are Rust-specific and get
496                             // treated as aggregates by C ABIs anyway.
497                             Abi::ScalarPair(..) => {
498                                 abi = field.abi;
499                             }
500                             _ => {}
501                         }
502                     }
503                 }
504
505                 // Two non-ZST fields, and they're both scalars.
506                 (
507                     Some((
508                         i,
509                         &TyAndLayout {
510                             layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(a), .. }, _)),
511                             ..
512                         },
513                     )),
514                     Some((
515                         j,
516                         &TyAndLayout {
517                             layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(b), .. }, _)),
518                             ..
519                         },
520                     )),
521                     None,
522                 ) => {
523                     // Order by the memory placement, not source order.
524                     let ((i, a), (j, b)) =
525                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
526                     let pair = self.scalar_pair(a, b);
527                     let pair_offsets = match pair.fields {
528                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
529                             assert_eq!(memory_index, &[0, 1]);
530                             offsets
531                         }
532                         _ => bug!(),
533                     };
534                     if offsets[i] == pair_offsets[0]
535                         && offsets[j] == pair_offsets[1]
536                         && align == pair.align
537                         && size == pair.size
538                     {
539                         // We can use `ScalarPair` only when it matches our
540                         // already computed layout (including `#[repr(C)]`).
541                         abi = pair.abi;
542                     }
543                 }
544
545                 _ => {}
546             }
547         }
548
549         if fields.iter().any(|f| f.abi.is_uninhabited()) {
550             abi = Abi::Uninhabited;
551         }
552
553         Ok(LayoutS {
554             variants: Variants::Single { index: VariantIdx::new(0) },
555             fields: FieldsShape::Arbitrary { offsets, memory_index },
556             abi,
557             largest_niche,
558             align,
559             size,
560         })
561     }
562
563     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
564         let tcx = self.tcx;
565         let param_env = self.param_env;
566         let dl = self.data_layout();
567         let scalar_unit = |value: Primitive| {
568             let size = value.size(dl);
569             assert!(size.bits() <= 128);
570             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
571         };
572         let scalar =
573             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
574
575         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
576             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
577         };
578         debug_assert!(!ty.has_infer_types_or_consts());
579
580         Ok(match *ty.kind() {
581             // Basic scalars.
582             ty::Bool => tcx.intern_layout(LayoutS::scalar(
583                 self,
584                 Scalar::Initialized {
585                     value: Int(I8, false),
586                     valid_range: WrappingRange { start: 0, end: 1 },
587                 },
588             )),
589             ty::Char => tcx.intern_layout(LayoutS::scalar(
590                 self,
591                 Scalar::Initialized {
592                     value: Int(I32, false),
593                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
594                 },
595             )),
596             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
597             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
598             ty::Float(fty) => scalar(match fty {
599                 ty::FloatTy::F32 => F32,
600                 ty::FloatTy::F64 => F64,
601             }),
602             ty::FnPtr(_) => {
603                 let mut ptr = scalar_unit(Pointer);
604                 ptr.valid_range_mut().start = 1;
605                 tcx.intern_layout(LayoutS::scalar(self, ptr))
606             }
607
608             // The never type.
609             ty::Never => tcx.intern_layout(LayoutS {
610                 variants: Variants::Single { index: VariantIdx::new(0) },
611                 fields: FieldsShape::Primitive,
612                 abi: Abi::Uninhabited,
613                 largest_niche: None,
614                 align: dl.i8_align,
615                 size: Size::ZERO,
616             }),
617
618             // Potentially-wide pointers.
619             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
620                 let mut data_ptr = scalar_unit(Pointer);
621                 if !ty.is_unsafe_ptr() {
622                     data_ptr.valid_range_mut().start = 1;
623                 }
624
625                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
626                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
627                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
628                 }
629
630                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
631                 let metadata = match unsized_part.kind() {
632                     ty::Foreign(..) => {
633                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
634                     }
635                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
636                     ty::Dynamic(..) => {
637                         let mut vtable = scalar_unit(Pointer);
638                         vtable.valid_range_mut().start = 1;
639                         vtable
640                     }
641                     _ => return Err(LayoutError::Unknown(unsized_part)),
642                 };
643
644                 // Effectively a (ptr, meta) tuple.
645                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
646             }
647
648             // Arrays and slices.
649             ty::Array(element, mut count) => {
650                 if count.has_projections() {
651                     count = tcx.normalize_erasing_regions(param_env, count);
652                     if count.has_projections() {
653                         return Err(LayoutError::Unknown(ty));
654                     }
655                 }
656
657                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
658                 let element = self.layout_of(element)?;
659                 let size =
660                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
661
662                 let abi =
663                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
664                         Abi::Uninhabited
665                     } else {
666                         Abi::Aggregate { sized: true }
667                     };
668
669                 let largest_niche = if count != 0 { element.largest_niche } else { None };
670
671                 tcx.intern_layout(LayoutS {
672                     variants: Variants::Single { index: VariantIdx::new(0) },
673                     fields: FieldsShape::Array { stride: element.size, count },
674                     abi,
675                     largest_niche,
676                     align: element.align,
677                     size,
678                 })
679             }
680             ty::Slice(element) => {
681                 let element = self.layout_of(element)?;
682                 tcx.intern_layout(LayoutS {
683                     variants: Variants::Single { index: VariantIdx::new(0) },
684                     fields: FieldsShape::Array { stride: element.size, count: 0 },
685                     abi: Abi::Aggregate { sized: false },
686                     largest_niche: None,
687                     align: element.align,
688                     size: Size::ZERO,
689                 })
690             }
691             ty::Str => tcx.intern_layout(LayoutS {
692                 variants: Variants::Single { index: VariantIdx::new(0) },
693                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
694                 abi: Abi::Aggregate { sized: false },
695                 largest_niche: None,
696                 align: dl.i8_align,
697                 size: Size::ZERO,
698             }),
699
700             // Odd unit types.
701             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
702             ty::Dynamic(..) | ty::Foreign(..) => {
703                 let mut unit = self.univariant_uninterned(
704                     ty,
705                     &[],
706                     &ReprOptions::default(),
707                     StructKind::AlwaysSized,
708                 )?;
709                 match unit.abi {
710                     Abi::Aggregate { ref mut sized } => *sized = false,
711                     _ => bug!(),
712                 }
713                 tcx.intern_layout(unit)
714             }
715
716             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
717
718             ty::Closure(_, ref substs) => {
719                 let tys = substs.as_closure().upvar_tys();
720                 univariant(
721                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
722                     &ReprOptions::default(),
723                     StructKind::AlwaysSized,
724                 )?
725             }
726
727             ty::Tuple(tys) => {
728                 let kind =
729                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
730
731                 univariant(
732                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
733                     &ReprOptions::default(),
734                     kind,
735                 )?
736             }
737
738             // SIMD vector types.
739             ty::Adt(def, substs) if def.repr().simd() => {
740                 if !def.is_struct() {
741                     // Should have yielded E0517 by now.
742                     tcx.sess.delay_span_bug(
743                         DUMMY_SP,
744                         "#[repr(simd)] was applied to an ADT that is not a struct",
745                     );
746                     return Err(LayoutError::Unknown(ty));
747                 }
748
749                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
750                 //
751                 // * #[repr(simd)] struct S(T, T, T, T);
752                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
753                 // * #[repr(simd)] struct S([T; 4])
754                 //
755                 // where T is a primitive scalar (integer/float/pointer).
756
757                 // SIMD vectors with zero fields are not supported.
758                 // (should be caught by typeck)
759                 if def.non_enum_variant().fields.is_empty() {
760                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
761                 }
762
763                 // Type of the first ADT field:
764                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
765
766                 // Heterogeneous SIMD vectors are not supported:
767                 // (should be caught by typeck)
768                 for fi in &def.non_enum_variant().fields {
769                     if fi.ty(tcx, substs) != f0_ty {
770                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
771                     }
772                 }
773
774                 // The element type and number of elements of the SIMD vector
775                 // are obtained from:
776                 //
777                 // * the element type and length of the single array field, if
778                 // the first field is of array type, or
779                 //
780                 // * the homogenous field type and the number of fields.
781                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
782                     // First ADT field is an array:
783
784                     // SIMD vectors with multiple array fields are not supported:
785                     // (should be caught by typeck)
786                     if def.non_enum_variant().fields.len() != 1 {
787                         tcx.sess.fatal(&format!(
788                             "monomorphising SIMD type `{}` with more than one array field",
789                             ty
790                         ));
791                     }
792
793                     // Extract the number of elements from the layout of the array field:
794                     let Ok(TyAndLayout {
795                         layout: Layout(Interned(LayoutS { fields: FieldsShape::Array { count, .. }, .. }, _)),
796                         ..
797                     }) = self.layout_of(f0_ty) else {
798                         return Err(LayoutError::Unknown(ty));
799                     };
800
801                     (*e_ty, *count, true)
802                 } else {
803                     // First ADT field is not an array:
804                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
805                 };
806
807                 // SIMD vectors of zero length are not supported.
808                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
809                 // support.
810                 //
811                 // Can't be caught in typeck if the array length is generic.
812                 if e_len == 0 {
813                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
814                 } else if e_len > MAX_SIMD_LANES {
815                     tcx.sess.fatal(&format!(
816                         "monomorphising SIMD type `{}` of length greater than {}",
817                         ty, MAX_SIMD_LANES,
818                     ));
819                 }
820
821                 // Compute the ABI of the element type:
822                 let e_ly = self.layout_of(e_ty)?;
823                 let Abi::Scalar(e_abi) = e_ly.abi else {
824                     // This error isn't caught in typeck, e.g., if
825                     // the element type of the vector is generic.
826                     tcx.sess.fatal(&format!(
827                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
828                         (integer/float/pointer) element type `{}`",
829                         ty, e_ty
830                     ))
831                 };
832
833                 // Compute the size and alignment of the vector:
834                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
835                 let align = dl.vector_align(size);
836                 let size = size.align_to(align.abi);
837
838                 // Compute the placement of the vector fields:
839                 let fields = if is_array {
840                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
841                 } else {
842                     FieldsShape::Array { stride: e_ly.size, count: e_len }
843                 };
844
845                 tcx.intern_layout(LayoutS {
846                     variants: Variants::Single { index: VariantIdx::new(0) },
847                     fields,
848                     abi: Abi::Vector { element: e_abi, count: e_len },
849                     largest_niche: e_ly.largest_niche,
850                     size,
851                     align,
852                 })
853             }
854
855             // ADTs.
856             ty::Adt(def, substs) => {
857                 // Cache the field layouts.
858                 let variants = def
859                     .variants()
860                     .iter()
861                     .map(|v| {
862                         v.fields
863                             .iter()
864                             .map(|field| self.layout_of(field.ty(tcx, substs)))
865                             .collect::<Result<Vec<_>, _>>()
866                     })
867                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
868
869                 if def.is_union() {
870                     if def.repr().pack.is_some() && def.repr().align.is_some() {
871                         self.tcx.sess.delay_span_bug(
872                             tcx.def_span(def.did()),
873                             "union cannot be packed and aligned",
874                         );
875                         return Err(LayoutError::Unknown(ty));
876                     }
877
878                     let mut align =
879                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
880
881                     if let Some(repr_align) = def.repr().align {
882                         align = align.max(AbiAndPrefAlign::new(repr_align));
883                     }
884
885                     let optimize = !def.repr().inhibit_union_abi_opt();
886                     let mut size = Size::ZERO;
887                     let mut abi = Abi::Aggregate { sized: true };
888                     let index = VariantIdx::new(0);
889                     for field in &variants[index] {
890                         assert!(!field.is_unsized());
891                         align = align.max(field.align);
892
893                         // If all non-ZST fields have the same ABI, forward this ABI
894                         if optimize && !field.is_zst() {
895                             // Discard valid range information and allow undef
896                             let field_abi = match field.abi {
897                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
898                                 Abi::ScalarPair(x, y) => {
899                                     Abi::ScalarPair(x.to_union(), y.to_union())
900                                 }
901                                 Abi::Vector { element: x, count } => {
902                                     Abi::Vector { element: x.to_union(), count }
903                                 }
904                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
905                                     Abi::Aggregate { sized: true }
906                                 }
907                             };
908
909                             if size == Size::ZERO {
910                                 // first non ZST: initialize 'abi'
911                                 abi = field_abi;
912                             } else if abi != field_abi {
913                                 // different fields have different ABI: reset to Aggregate
914                                 abi = Abi::Aggregate { sized: true };
915                             }
916                         }
917
918                         size = cmp::max(size, field.size);
919                     }
920
921                     if let Some(pack) = def.repr().pack {
922                         align = align.min(AbiAndPrefAlign::new(pack));
923                     }
924
925                     return Ok(tcx.intern_layout(LayoutS {
926                         variants: Variants::Single { index },
927                         fields: FieldsShape::Union(
928                             NonZeroUsize::new(variants[index].len())
929                                 .ok_or(LayoutError::Unknown(ty))?,
930                         ),
931                         abi,
932                         largest_niche: None,
933                         align,
934                         size: size.align_to(align.abi),
935                     }));
936                 }
937
938                 // A variant is absent if it's uninhabited and only has ZST fields.
939                 // Present uninhabited variants only require space for their fields,
940                 // but *not* an encoding of the discriminant (e.g., a tag value).
941                 // See issue #49298 for more details on the need to leave space
942                 // for non-ZST uninhabited data (mostly partial initialization).
943                 let absent = |fields: &[TyAndLayout<'_>]| {
944                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
945                     let is_zst = fields.iter().all(|f| f.is_zst());
946                     uninhabited && is_zst
947                 };
948                 let (present_first, present_second) = {
949                     let mut present_variants = variants
950                         .iter_enumerated()
951                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
952                     (present_variants.next(), present_variants.next())
953                 };
954                 let present_first = match present_first {
955                     Some(present_first) => present_first,
956                     // Uninhabited because it has no variants, or only absent ones.
957                     None if def.is_enum() => {
958                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
959                     }
960                     // If it's a struct, still compute a layout so that we can still compute the
961                     // field offsets.
962                     None => VariantIdx::new(0),
963                 };
964
965                 let is_struct = !def.is_enum() ||
966                     // Only one variant is present.
967                     (present_second.is_none() &&
968                     // Representation optimizations are allowed.
969                     !def.repr().inhibit_enum_layout_opt());
970                 if is_struct {
971                     // Struct, or univariant enum equivalent to a struct.
972                     // (Typechecking will reject discriminant-sizing attrs.)
973
974                     let v = present_first;
975                     let kind = if def.is_enum() || variants[v].is_empty() {
976                         StructKind::AlwaysSized
977                     } else {
978                         let param_env = tcx.param_env(def.did());
979                         let last_field = def.variant(v).fields.last().unwrap();
980                         let always_sized =
981                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
982                         if !always_sized {
983                             StructKind::MaybeUnsized
984                         } else {
985                             StructKind::AlwaysSized
986                         }
987                     };
988
989                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
990                     st.variants = Variants::Single { index: v };
991                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
992                     match st.abi {
993                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
994                             // the asserts ensure that we are not using the
995                             // `#[rustc_layout_scalar_valid_range(n)]`
996                             // attribute to widen the range of anything as that would probably
997                             // result in UB somewhere
998                             // FIXME(eddyb) the asserts are probably not needed,
999                             // as larger validity ranges would result in missed
1000                             // optimizations, *not* wrongly assuming the inner
1001                             // value is valid. e.g. unions enlarge validity ranges,
1002                             // because the values may be uninitialized.
1003                             if let Bound::Included(start) = start {
1004                                 // FIXME(eddyb) this might be incorrect - it doesn't
1005                                 // account for wrap-around (end < start) ranges.
1006                                 let valid_range = scalar.valid_range_mut();
1007                                 assert!(valid_range.start <= start);
1008                                 valid_range.start = start;
1009                             }
1010                             if let Bound::Included(end) = end {
1011                                 // FIXME(eddyb) this might be incorrect - it doesn't
1012                                 // account for wrap-around (end < start) ranges.
1013                                 let valid_range = scalar.valid_range_mut();
1014                                 assert!(valid_range.end >= end);
1015                                 valid_range.end = end;
1016                             }
1017
1018                             // Update `largest_niche` if we have introduced a larger niche.
1019                             let niche = if def.repr().hide_niche() {
1020                                 None
1021                             } else {
1022                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1023                             };
1024                             if let Some(niche) = niche {
1025                                 match st.largest_niche {
1026                                     Some(largest_niche) => {
1027                                         // Replace the existing niche even if they're equal,
1028                                         // because this one is at a lower offset.
1029                                         if largest_niche.available(dl) <= niche.available(dl) {
1030                                             st.largest_niche = Some(niche);
1031                                         }
1032                                     }
1033                                     None => st.largest_niche = Some(niche),
1034                                 }
1035                             }
1036                         }
1037                         _ => assert!(
1038                             start == Bound::Unbounded && end == Bound::Unbounded,
1039                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1040                             def,
1041                             st,
1042                         ),
1043                     }
1044
1045                     return Ok(tcx.intern_layout(st));
1046                 }
1047
1048                 // At this point, we have handled all unions and
1049                 // structs. (We have also handled univariant enums
1050                 // that allow representation optimization.)
1051                 assert!(def.is_enum());
1052
1053                 // The current code for niche-filling relies on variant indices
1054                 // instead of actual discriminants, so dataful enums with
1055                 // explicit discriminants (RFC #2363) would misbehave.
1056                 let no_explicit_discriminants = def
1057                     .variants()
1058                     .iter_enumerated()
1059                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1060
1061                 let mut niche_filling_layout = None;
1062
1063                 // Niche-filling enum optimization.
1064                 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1065                     let mut dataful_variant = None;
1066                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1067
1068                     // Find one non-ZST variant.
1069                     'variants: for (v, fields) in variants.iter_enumerated() {
1070                         if absent(fields) {
1071                             continue 'variants;
1072                         }
1073                         for f in fields {
1074                             if !f.is_zst() {
1075                                 if dataful_variant.is_none() {
1076                                     dataful_variant = Some(v);
1077                                     continue 'variants;
1078                                 } else {
1079                                     dataful_variant = None;
1080                                     break 'variants;
1081                                 }
1082                             }
1083                         }
1084                         niche_variants = *niche_variants.start().min(&v)..=v;
1085                     }
1086
1087                     if niche_variants.start() > niche_variants.end() {
1088                         dataful_variant = None;
1089                     }
1090
1091                     if let Some(i) = dataful_variant {
1092                         let count = (niche_variants.end().as_u32()
1093                             - niche_variants.start().as_u32()
1094                             + 1) as u128;
1095
1096                         // Find the field with the largest niche
1097                         let niche_candidate = variants[i]
1098                             .iter()
1099                             .enumerate()
1100                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1101                             .max_by_key(|(_, niche)| niche.available(dl));
1102
1103                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1104                             niche_candidate.and_then(|(field_index, niche)| {
1105                                 Some((field_index, niche, niche.reserve(self, count)?))
1106                             })
1107                         {
1108                             let mut align = dl.aggregate_align;
1109                             let st = variants
1110                                 .iter_enumerated()
1111                                 .map(|(j, v)| {
1112                                     let mut st = self.univariant_uninterned(
1113                                         ty,
1114                                         v,
1115                                         &def.repr(),
1116                                         StructKind::AlwaysSized,
1117                                     )?;
1118                                     st.variants = Variants::Single { index: j };
1119
1120                                     align = align.max(st.align);
1121
1122                                     Ok(tcx.intern_layout(st))
1123                                 })
1124                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1125
1126                             let offset = st[i].fields().offset(field_index) + niche.offset;
1127                             let size = st[i].size();
1128
1129                             let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1130                                 Abi::Uninhabited
1131                             } else {
1132                                 match st[i].abi() {
1133                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1134                                     Abi::ScalarPair(first, second) => {
1135                                         // We need to use scalar_unit to reset the
1136                                         // valid range to the maximal one for that
1137                                         // primitive, because only the niche is
1138                                         // guaranteed to be initialised, not the
1139                                         // other primitive.
1140                                         if offset.bytes() == 0 {
1141                                             Abi::ScalarPair(
1142                                                 niche_scalar,
1143                                                 scalar_unit(second.primitive()),
1144                                             )
1145                                         } else {
1146                                             Abi::ScalarPair(
1147                                                 scalar_unit(first.primitive()),
1148                                                 niche_scalar,
1149                                             )
1150                                         }
1151                                     }
1152                                     _ => Abi::Aggregate { sized: true },
1153                                 }
1154                             };
1155
1156                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1157
1158                             niche_filling_layout = Some(LayoutS {
1159                                 variants: Variants::Multiple {
1160                                     tag: niche_scalar,
1161                                     tag_encoding: TagEncoding::Niche {
1162                                         dataful_variant: i,
1163                                         niche_variants,
1164                                         niche_start,
1165                                     },
1166                                     tag_field: 0,
1167                                     variants: st,
1168                                 },
1169                                 fields: FieldsShape::Arbitrary {
1170                                     offsets: vec![offset],
1171                                     memory_index: vec![0],
1172                                 },
1173                                 abi,
1174                                 largest_niche,
1175                                 size,
1176                                 align,
1177                             });
1178                         }
1179                     }
1180                 }
1181
1182                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1183                 let discr_type = def.repr().discr_type();
1184                 let bits = Integer::from_attr(self, discr_type).size().bits();
1185                 for (i, discr) in def.discriminants(tcx) {
1186                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1187                         continue;
1188                     }
1189                     let mut x = discr.val as i128;
1190                     if discr_type.is_signed() {
1191                         // sign extend the raw representation to be an i128
1192                         x = (x << (128 - bits)) >> (128 - bits);
1193                     }
1194                     if x < min {
1195                         min = x;
1196                     }
1197                     if x > max {
1198                         max = x;
1199                     }
1200                 }
1201                 // We might have no inhabited variants, so pretend there's at least one.
1202                 if (min, max) == (i128::MAX, i128::MIN) {
1203                     min = 0;
1204                     max = 0;
1205                 }
1206                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1207                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1208
1209                 let mut align = dl.aggregate_align;
1210                 let mut size = Size::ZERO;
1211
1212                 // We're interested in the smallest alignment, so start large.
1213                 let mut start_align = Align::from_bytes(256).unwrap();
1214                 assert_eq!(Integer::for_align(dl, start_align), None);
1215
1216                 // repr(C) on an enum tells us to make a (tag, union) layout,
1217                 // so we need to grow the prefix alignment to be at least
1218                 // the alignment of the union. (This value is used both for
1219                 // determining the alignment of the overall enum, and the
1220                 // determining the alignment of the payload after the tag.)
1221                 let mut prefix_align = min_ity.align(dl).abi;
1222                 if def.repr().c() {
1223                     for fields in &variants {
1224                         for field in fields {
1225                             prefix_align = prefix_align.max(field.align.abi);
1226                         }
1227                     }
1228                 }
1229
1230                 // Create the set of structs that represent each variant.
1231                 let mut layout_variants = variants
1232                     .iter_enumerated()
1233                     .map(|(i, field_layouts)| {
1234                         let mut st = self.univariant_uninterned(
1235                             ty,
1236                             &field_layouts,
1237                             &def.repr(),
1238                             StructKind::Prefixed(min_ity.size(), prefix_align),
1239                         )?;
1240                         st.variants = Variants::Single { index: i };
1241                         // Find the first field we can't move later
1242                         // to make room for a larger discriminant.
1243                         for field in
1244                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1245                         {
1246                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1247                                 start_align = start_align.min(field.align.abi);
1248                                 break;
1249                             }
1250                         }
1251                         size = cmp::max(size, st.size);
1252                         align = align.max(st.align);
1253                         Ok(st)
1254                     })
1255                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1256
1257                 // Align the maximum variant size to the largest alignment.
1258                 size = size.align_to(align.abi);
1259
1260                 if size.bytes() >= dl.obj_size_bound() {
1261                     return Err(LayoutError::SizeOverflow(ty));
1262                 }
1263
1264                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1265                 if typeck_ity < min_ity {
1266                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1267                     // some reason at this point (based on values discriminant can take on). Mostly
1268                     // because this discriminant will be loaded, and then stored into variable of
1269                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1270                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1271                     // discriminant values. That would be a bug, because then, in codegen, in order
1272                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1273                     // space necessary to represent would have to be discarded (or layout is wrong
1274                     // on thinking it needs 16 bits)
1275                     bug!(
1276                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1277                         min_ity,
1278                         typeck_ity
1279                     );
1280                     // However, it is fine to make discr type however large (as an optimisation)
1281                     // after this point â€“ we’ll just truncate the value we load in codegen.
1282                 }
1283
1284                 // Check to see if we should use a different type for the
1285                 // discriminant. We can safely use a type with the same size
1286                 // as the alignment of the first field of each variant.
1287                 // We increase the size of the discriminant to avoid LLVM copying
1288                 // padding when it doesn't need to. This normally causes unaligned
1289                 // load/stores and excessive memcpy/memset operations. By using a
1290                 // bigger integer size, LLVM can be sure about its contents and
1291                 // won't be so conservative.
1292
1293                 // Use the initial field alignment
1294                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1295                     min_ity
1296                 } else {
1297                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1298                 };
1299
1300                 // If the alignment is not larger than the chosen discriminant size,
1301                 // don't use the alignment as the final size.
1302                 if ity <= min_ity {
1303                     ity = min_ity;
1304                 } else {
1305                     // Patch up the variants' first few fields.
1306                     let old_ity_size = min_ity.size();
1307                     let new_ity_size = ity.size();
1308                     for variant in &mut layout_variants {
1309                         match variant.fields {
1310                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1311                                 for i in offsets {
1312                                     if *i <= old_ity_size {
1313                                         assert_eq!(*i, old_ity_size);
1314                                         *i = new_ity_size;
1315                                     }
1316                                 }
1317                                 // We might be making the struct larger.
1318                                 if variant.size <= old_ity_size {
1319                                     variant.size = new_ity_size;
1320                                 }
1321                             }
1322                             _ => bug!(),
1323                         }
1324                     }
1325                 }
1326
1327                 let tag_mask = ity.size().unsigned_int_max();
1328                 let tag = Scalar::Initialized {
1329                     value: Int(ity, signed),
1330                     valid_range: WrappingRange {
1331                         start: (min as u128 & tag_mask),
1332                         end: (max as u128 & tag_mask),
1333                     },
1334                 };
1335                 let mut abi = Abi::Aggregate { sized: true };
1336
1337                 // Without latter check aligned enums with custom discriminant values
1338                 // Would result in ICE see the issue #92464 for more info
1339                 if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1340                     abi = Abi::Scalar(tag);
1341                 } else {
1342                     // Try to use a ScalarPair for all tagged enums.
1343                     let mut common_prim = None;
1344                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1345                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1346                             bug!();
1347                         };
1348                         let mut fields =
1349                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1350                         let (field, offset) = match (fields.next(), fields.next()) {
1351                             (None, None) => continue,
1352                             (Some(pair), None) => pair,
1353                             _ => {
1354                                 common_prim = None;
1355                                 break;
1356                             }
1357                         };
1358                         let prim = match field.abi {
1359                             Abi::Scalar(scalar) => scalar.primitive(),
1360                             _ => {
1361                                 common_prim = None;
1362                                 break;
1363                             }
1364                         };
1365                         if let Some(pair) = common_prim {
1366                             // This is pretty conservative. We could go fancier
1367                             // by conflating things like i32 and u32, or even
1368                             // realising that (u8, u8) could just cohabit with
1369                             // u16 or even u32.
1370                             if pair != (prim, offset) {
1371                                 common_prim = None;
1372                                 break;
1373                             }
1374                         } else {
1375                             common_prim = Some((prim, offset));
1376                         }
1377                     }
1378                     if let Some((prim, offset)) = common_prim {
1379                         let pair = self.scalar_pair(tag, scalar_unit(prim));
1380                         let pair_offsets = match pair.fields {
1381                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1382                                 assert_eq!(memory_index, &[0, 1]);
1383                                 offsets
1384                             }
1385                             _ => bug!(),
1386                         };
1387                         if pair_offsets[0] == Size::ZERO
1388                             && pair_offsets[1] == *offset
1389                             && align == pair.align
1390                             && size == pair.size
1391                         {
1392                             // We can use `ScalarPair` only when it matches our
1393                             // already computed layout (including `#[repr(C)]`).
1394                             abi = pair.abi;
1395                         }
1396                     }
1397                 }
1398
1399                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1400                     abi = Abi::Uninhabited;
1401                 }
1402
1403                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1404
1405                 let layout_variants =
1406                     layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1407
1408                 let tagged_layout = LayoutS {
1409                     variants: Variants::Multiple {
1410                         tag,
1411                         tag_encoding: TagEncoding::Direct,
1412                         tag_field: 0,
1413                         variants: layout_variants,
1414                     },
1415                     fields: FieldsShape::Arbitrary {
1416                         offsets: vec![Size::ZERO],
1417                         memory_index: vec![0],
1418                     },
1419                     largest_niche,
1420                     abi,
1421                     align,
1422                     size,
1423                 };
1424
1425                 let best_layout = match (tagged_layout, niche_filling_layout) {
1426                     (tagged_layout, Some(niche_filling_layout)) => {
1427                         // Pick the smaller layout; otherwise,
1428                         // pick the layout with the larger niche; otherwise,
1429                         // pick tagged as it has simpler codegen.
1430                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1431                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1432                             (layout.size, cmp::Reverse(niche_size))
1433                         })
1434                     }
1435                     (tagged_layout, None) => tagged_layout,
1436                 };
1437
1438                 tcx.intern_layout(best_layout)
1439             }
1440
1441             // Types with no meaningful known layout.
1442             ty::Projection(_) | ty::Opaque(..) => {
1443                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1444                 // if that was possible, so there's no reason to try again here.
1445                 return Err(LayoutError::Unknown(ty));
1446             }
1447
1448             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1449                 bug!("Layout::compute: unexpected type `{}`", ty)
1450             }
1451
1452             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1453                 return Err(LayoutError::Unknown(ty));
1454             }
1455         })
1456     }
1457 }
1458
1459 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1460 #[derive(Clone, Debug, PartialEq)]
1461 enum SavedLocalEligibility {
1462     Unassigned,
1463     Assigned(VariantIdx),
1464     // FIXME: Use newtype_index so we aren't wasting bytes
1465     Ineligible(Option<u32>),
1466 }
1467
1468 // When laying out generators, we divide our saved local fields into two
1469 // categories: overlap-eligible and overlap-ineligible.
1470 //
1471 // Those fields which are ineligible for overlap go in a "prefix" at the
1472 // beginning of the layout, and always have space reserved for them.
1473 //
1474 // Overlap-eligible fields are only assigned to one variant, so we lay
1475 // those fields out for each variant and put them right after the
1476 // prefix.
1477 //
1478 // Finally, in the layout details, we point to the fields from the
1479 // variants they are assigned to. It is possible for some fields to be
1480 // included in multiple variants. No field ever "moves around" in the
1481 // layout; its offset is always the same.
1482 //
1483 // Also included in the layout are the upvars and the discriminant.
1484 // These are included as fields on the "outer" layout; they are not part
1485 // of any variant.
1486 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1487     /// Compute the eligibility and assignment of each local.
1488     fn generator_saved_local_eligibility(
1489         &self,
1490         info: &GeneratorLayout<'tcx>,
1491     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1492         use SavedLocalEligibility::*;
1493
1494         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1495             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1496
1497         // The saved locals not eligible for overlap. These will get
1498         // "promoted" to the prefix of our generator.
1499         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1500
1501         // Figure out which of our saved locals are fields in only
1502         // one variant. The rest are deemed ineligible for overlap.
1503         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1504             for local in fields {
1505                 match assignments[*local] {
1506                     Unassigned => {
1507                         assignments[*local] = Assigned(variant_index);
1508                     }
1509                     Assigned(idx) => {
1510                         // We've already seen this local at another suspension
1511                         // point, so it is no longer a candidate.
1512                         trace!(
1513                             "removing local {:?} in >1 variant ({:?}, {:?})",
1514                             local,
1515                             variant_index,
1516                             idx
1517                         );
1518                         ineligible_locals.insert(*local);
1519                         assignments[*local] = Ineligible(None);
1520                     }
1521                     Ineligible(_) => {}
1522                 }
1523             }
1524         }
1525
1526         // Next, check every pair of eligible locals to see if they
1527         // conflict.
1528         for local_a in info.storage_conflicts.rows() {
1529             let conflicts_a = info.storage_conflicts.count(local_a);
1530             if ineligible_locals.contains(local_a) {
1531                 continue;
1532             }
1533
1534             for local_b in info.storage_conflicts.iter(local_a) {
1535                 // local_a and local_b are storage live at the same time, therefore they
1536                 // cannot overlap in the generator layout. The only way to guarantee
1537                 // this is if they are in the same variant, or one is ineligible
1538                 // (which means it is stored in every variant).
1539                 if ineligible_locals.contains(local_b)
1540                     || assignments[local_a] == assignments[local_b]
1541                 {
1542                     continue;
1543                 }
1544
1545                 // If they conflict, we will choose one to make ineligible.
1546                 // This is not always optimal; it's just a greedy heuristic that
1547                 // seems to produce good results most of the time.
1548                 let conflicts_b = info.storage_conflicts.count(local_b);
1549                 let (remove, other) =
1550                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1551                 ineligible_locals.insert(remove);
1552                 assignments[remove] = Ineligible(None);
1553                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1554             }
1555         }
1556
1557         // Count the number of variants in use. If only one of them, then it is
1558         // impossible to overlap any locals in our layout. In this case it's
1559         // always better to make the remaining locals ineligible, so we can
1560         // lay them out with the other locals in the prefix and eliminate
1561         // unnecessary padding bytes.
1562         {
1563             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1564             for assignment in &assignments {
1565                 if let Assigned(idx) = assignment {
1566                     used_variants.insert(*idx);
1567                 }
1568             }
1569             if used_variants.count() < 2 {
1570                 for assignment in assignments.iter_mut() {
1571                     *assignment = Ineligible(None);
1572                 }
1573                 ineligible_locals.insert_all();
1574             }
1575         }
1576
1577         // Write down the order of our locals that will be promoted to the prefix.
1578         {
1579             for (idx, local) in ineligible_locals.iter().enumerate() {
1580                 assignments[local] = Ineligible(Some(idx as u32));
1581             }
1582         }
1583         debug!("generator saved local assignments: {:?}", assignments);
1584
1585         (ineligible_locals, assignments)
1586     }
1587
1588     /// Compute the full generator layout.
1589     fn generator_layout(
1590         &self,
1591         ty: Ty<'tcx>,
1592         def_id: hir::def_id::DefId,
1593         substs: SubstsRef<'tcx>,
1594     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1595         use SavedLocalEligibility::*;
1596         let tcx = self.tcx;
1597         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1598
1599         let Some(info) = tcx.generator_layout(def_id) else {
1600             return Err(LayoutError::Unknown(ty));
1601         };
1602         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1603
1604         // Build a prefix layout, including "promoting" all ineligible
1605         // locals as part of the prefix. We compute the layout of all of
1606         // these fields at once to get optimal packing.
1607         let tag_index = substs.as_generator().prefix_tys().count();
1608
1609         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1610         let max_discr = (info.variant_fields.len() - 1) as u128;
1611         let discr_int = Integer::fit_unsigned(max_discr);
1612         let discr_int_ty = discr_int.to_ty(tcx, false);
1613         let tag = Scalar::Initialized {
1614             value: Primitive::Int(discr_int, false),
1615             valid_range: WrappingRange { start: 0, end: max_discr },
1616         };
1617         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1618         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1619
1620         let promoted_layouts = ineligible_locals
1621             .iter()
1622             .map(|local| subst_field(info.field_tys[local]))
1623             .map(|ty| tcx.mk_maybe_uninit(ty))
1624             .map(|ty| self.layout_of(ty));
1625         let prefix_layouts = substs
1626             .as_generator()
1627             .prefix_tys()
1628             .map(|ty| self.layout_of(ty))
1629             .chain(iter::once(Ok(tag_layout)))
1630             .chain(promoted_layouts)
1631             .collect::<Result<Vec<_>, _>>()?;
1632         let prefix = self.univariant_uninterned(
1633             ty,
1634             &prefix_layouts,
1635             &ReprOptions::default(),
1636             StructKind::AlwaysSized,
1637         )?;
1638
1639         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1640
1641         // Split the prefix layout into the "outer" fields (upvars and
1642         // discriminant) and the "promoted" fields. Promoted fields will
1643         // get included in each variant that requested them in
1644         // GeneratorLayout.
1645         debug!("prefix = {:#?}", prefix);
1646         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1647             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1648                 let mut inverse_memory_index = invert_mapping(&memory_index);
1649
1650                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1651                 // "outer" and "promoted" fields respectively.
1652                 let b_start = (tag_index + 1) as u32;
1653                 let offsets_b = offsets.split_off(b_start as usize);
1654                 let offsets_a = offsets;
1655
1656                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1657                 // by preserving the order but keeping only one disjoint "half" each.
1658                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1659                 let inverse_memory_index_b: Vec<_> =
1660                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1661                 inverse_memory_index.retain(|&i| i < b_start);
1662                 let inverse_memory_index_a = inverse_memory_index;
1663
1664                 // Since `inverse_memory_index_{a,b}` each only refer to their
1665                 // respective fields, they can be safely inverted
1666                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1667                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1668
1669                 let outer_fields =
1670                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1671                 (outer_fields, offsets_b, memory_index_b)
1672             }
1673             _ => bug!(),
1674         };
1675
1676         let mut size = prefix.size;
1677         let mut align = prefix.align;
1678         let variants = info
1679             .variant_fields
1680             .iter_enumerated()
1681             .map(|(index, variant_fields)| {
1682                 // Only include overlap-eligible fields when we compute our variant layout.
1683                 let variant_only_tys = variant_fields
1684                     .iter()
1685                     .filter(|local| match assignments[**local] {
1686                         Unassigned => bug!(),
1687                         Assigned(v) if v == index => true,
1688                         Assigned(_) => bug!("assignment does not match variant"),
1689                         Ineligible(_) => false,
1690                     })
1691                     .map(|local| subst_field(info.field_tys[*local]));
1692
1693                 let mut variant = self.univariant_uninterned(
1694                     ty,
1695                     &variant_only_tys
1696                         .map(|ty| self.layout_of(ty))
1697                         .collect::<Result<Vec<_>, _>>()?,
1698                     &ReprOptions::default(),
1699                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1700                 )?;
1701                 variant.variants = Variants::Single { index };
1702
1703                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1704                     bug!();
1705                 };
1706
1707                 // Now, stitch the promoted and variant-only fields back together in
1708                 // the order they are mentioned by our GeneratorLayout.
1709                 // Because we only use some subset (that can differ between variants)
1710                 // of the promoted fields, we can't just pick those elements of the
1711                 // `promoted_memory_index` (as we'd end up with gaps).
1712                 // So instead, we build an "inverse memory_index", as if all of the
1713                 // promoted fields were being used, but leave the elements not in the
1714                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1715                 // obtain a valid (bijective) mapping.
1716                 const INVALID_FIELD_IDX: u32 = !0;
1717                 let mut combined_inverse_memory_index =
1718                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1719                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1720                 let combined_offsets = variant_fields
1721                     .iter()
1722                     .enumerate()
1723                     .map(|(i, local)| {
1724                         let (offset, memory_index) = match assignments[*local] {
1725                             Unassigned => bug!(),
1726                             Assigned(_) => {
1727                                 let (offset, memory_index) =
1728                                     offsets_and_memory_index.next().unwrap();
1729                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1730                             }
1731                             Ineligible(field_idx) => {
1732                                 let field_idx = field_idx.unwrap() as usize;
1733                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1734                             }
1735                         };
1736                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1737                         offset
1738                     })
1739                     .collect();
1740
1741                 // Remove the unused slots and invert the mapping to obtain the
1742                 // combined `memory_index` (also see previous comment).
1743                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1744                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1745
1746                 variant.fields = FieldsShape::Arbitrary {
1747                     offsets: combined_offsets,
1748                     memory_index: combined_memory_index,
1749                 };
1750
1751                 size = size.max(variant.size);
1752                 align = align.max(variant.align);
1753                 Ok(tcx.intern_layout(variant))
1754             })
1755             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1756
1757         size = size.align_to(align.abi);
1758
1759         let abi =
1760             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1761                 Abi::Uninhabited
1762             } else {
1763                 Abi::Aggregate { sized: true }
1764             };
1765
1766         let layout = tcx.intern_layout(LayoutS {
1767             variants: Variants::Multiple {
1768                 tag,
1769                 tag_encoding: TagEncoding::Direct,
1770                 tag_field: tag_index,
1771                 variants,
1772             },
1773             fields: outer_fields,
1774             abi,
1775             largest_niche: prefix.largest_niche,
1776             size,
1777             align,
1778         });
1779         debug!("generator layout ({:?}): {:#?}", ty, layout);
1780         Ok(layout)
1781     }
1782
1783     /// This is invoked by the `layout_of` query to record the final
1784     /// layout of each type.
1785     #[inline(always)]
1786     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1787         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1788         // for dumping later.
1789         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1790             self.record_layout_for_printing_outlined(layout)
1791         }
1792     }
1793
1794     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1795         // Ignore layouts that are done with non-empty environments or
1796         // non-monomorphic layouts, as the user only wants to see the stuff
1797         // resulting from the final codegen session.
1798         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1799             return;
1800         }
1801
1802         // (delay format until we actually need it)
1803         let record = |kind, packed, opt_discr_size, variants| {
1804             let type_desc = format!("{:?}", layout.ty);
1805             self.tcx.sess.code_stats.record_type_size(
1806                 kind,
1807                 type_desc,
1808                 layout.align.abi,
1809                 layout.size,
1810                 packed,
1811                 opt_discr_size,
1812                 variants,
1813             );
1814         };
1815
1816         let adt_def = match *layout.ty.kind() {
1817             ty::Adt(ref adt_def, _) => {
1818                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1819                 adt_def
1820             }
1821
1822             ty::Closure(..) => {
1823                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1824                 record(DataTypeKind::Closure, false, None, vec![]);
1825                 return;
1826             }
1827
1828             _ => {
1829                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1830                 return;
1831             }
1832         };
1833
1834         let adt_kind = adt_def.adt_kind();
1835         let adt_packed = adt_def.repr().pack.is_some();
1836
1837         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1838             let mut min_size = Size::ZERO;
1839             let field_info: Vec<_> = flds
1840                 .iter()
1841                 .enumerate()
1842                 .map(|(i, &name)| {
1843                     let field_layout = layout.field(self, i);
1844                     let offset = layout.fields.offset(i);
1845                     let field_end = offset + field_layout.size;
1846                     if min_size < field_end {
1847                         min_size = field_end;
1848                     }
1849                     FieldInfo {
1850                         name: name.to_string(),
1851                         offset: offset.bytes(),
1852                         size: field_layout.size.bytes(),
1853                         align: field_layout.align.abi.bytes(),
1854                     }
1855                 })
1856                 .collect();
1857
1858             VariantInfo {
1859                 name: n.map(|n| n.to_string()),
1860                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1861                 align: layout.align.abi.bytes(),
1862                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1863                 fields: field_info,
1864             }
1865         };
1866
1867         match layout.variants {
1868             Variants::Single { index } => {
1869                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1870                     debug!(
1871                         "print-type-size `{:#?}` variant {}",
1872                         layout,
1873                         adt_def.variant(index).name
1874                     );
1875                     let variant_def = &adt_def.variant(index);
1876                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1877                     record(
1878                         adt_kind.into(),
1879                         adt_packed,
1880                         None,
1881                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1882                     );
1883                 } else {
1884                     // (This case arises for *empty* enums; so give it
1885                     // zero variants.)
1886                     record(adt_kind.into(), adt_packed, None, vec![]);
1887                 }
1888             }
1889
1890             Variants::Multiple { tag, ref tag_encoding, .. } => {
1891                 debug!(
1892                     "print-type-size `{:#?}` adt general variants def {}",
1893                     layout.ty,
1894                     adt_def.variants().len()
1895                 );
1896                 let variant_infos: Vec<_> = adt_def
1897                     .variants()
1898                     .iter_enumerated()
1899                     .map(|(i, variant_def)| {
1900                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1901                         build_variant_info(
1902                             Some(variant_def.name),
1903                             &fields,
1904                             layout.for_variant(self, i),
1905                         )
1906                     })
1907                     .collect();
1908                 record(
1909                     adt_kind.into(),
1910                     adt_packed,
1911                     match tag_encoding {
1912                         TagEncoding::Direct => Some(tag.size(self)),
1913                         _ => None,
1914                     },
1915                     variant_infos,
1916                 );
1917             }
1918         }
1919     }
1920 }
1921
1922 /// Type size "skeleton", i.e., the only information determining a type's size.
1923 /// While this is conservative, (aside from constant sizes, only pointers,
1924 /// newtypes thereof and null pointer optimized enums are allowed), it is
1925 /// enough to statically check common use cases of transmute.
1926 #[derive(Copy, Clone, Debug)]
1927 pub enum SizeSkeleton<'tcx> {
1928     /// Any statically computable Layout.
1929     Known(Size),
1930
1931     /// A potentially-fat pointer.
1932     Pointer {
1933         /// If true, this pointer is never null.
1934         non_zero: bool,
1935         /// The type which determines the unsized metadata, if any,
1936         /// of this pointer. Either a type parameter or a projection
1937         /// depending on one, with regions erased.
1938         tail: Ty<'tcx>,
1939     },
1940 }
1941
1942 impl<'tcx> SizeSkeleton<'tcx> {
1943     pub fn compute(
1944         ty: Ty<'tcx>,
1945         tcx: TyCtxt<'tcx>,
1946         param_env: ty::ParamEnv<'tcx>,
1947     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1948         debug_assert!(!ty.has_infer_types_or_consts());
1949
1950         // First try computing a static layout.
1951         let err = match tcx.layout_of(param_env.and(ty)) {
1952             Ok(layout) => {
1953                 return Ok(SizeSkeleton::Known(layout.size));
1954             }
1955             Err(err) => err,
1956         };
1957
1958         match *ty.kind() {
1959             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1960                 let non_zero = !ty.is_unsafe_ptr();
1961                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1962                 match tail.kind() {
1963                     ty::Param(_) | ty::Projection(_) => {
1964                         debug_assert!(tail.has_param_types_or_consts());
1965                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1966                     }
1967                     _ => bug!(
1968                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1969                               tail `{}` is not a type parameter or a projection",
1970                         ty,
1971                         err,
1972                         tail
1973                     ),
1974                 }
1975             }
1976
1977             ty::Adt(def, substs) => {
1978                 // Only newtypes and enums w/ nullable pointer optimization.
1979                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1980                     return Err(err);
1981                 }
1982
1983                 // Get a zero-sized variant or a pointer newtype.
1984                 let zero_or_ptr_variant = |i| {
1985                     let i = VariantIdx::new(i);
1986                     let fields =
1987                         def.variant(i).fields.iter().map(|field| {
1988                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1989                         });
1990                     let mut ptr = None;
1991                     for field in fields {
1992                         let field = field?;
1993                         match field {
1994                             SizeSkeleton::Known(size) => {
1995                                 if size.bytes() > 0 {
1996                                     return Err(err);
1997                                 }
1998                             }
1999                             SizeSkeleton::Pointer { .. } => {
2000                                 if ptr.is_some() {
2001                                     return Err(err);
2002                                 }
2003                                 ptr = Some(field);
2004                             }
2005                         }
2006                     }
2007                     Ok(ptr)
2008                 };
2009
2010                 let v0 = zero_or_ptr_variant(0)?;
2011                 // Newtype.
2012                 if def.variants().len() == 1 {
2013                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2014                         return Ok(SizeSkeleton::Pointer {
2015                             non_zero: non_zero
2016                                 || match tcx.layout_scalar_valid_range(def.did()) {
2017                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2018                                     (Bound::Included(start), Bound::Included(end)) => {
2019                                         0 < start && start < end
2020                                     }
2021                                     _ => false,
2022                                 },
2023                             tail,
2024                         });
2025                     } else {
2026                         return Err(err);
2027                     }
2028                 }
2029
2030                 let v1 = zero_or_ptr_variant(1)?;
2031                 // Nullable pointer enum optimization.
2032                 match (v0, v1) {
2033                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2034                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2035                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2036                     }
2037                     _ => Err(err),
2038                 }
2039             }
2040
2041             ty::Projection(_) | ty::Opaque(..) => {
2042                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2043                 if ty == normalized {
2044                     Err(err)
2045                 } else {
2046                     SizeSkeleton::compute(normalized, tcx, param_env)
2047                 }
2048             }
2049
2050             _ => Err(err),
2051         }
2052     }
2053
2054     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2055         match (self, other) {
2056             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2057             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2058                 a == b
2059             }
2060             _ => false,
2061         }
2062     }
2063 }
2064
2065 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2066     fn tcx(&self) -> TyCtxt<'tcx>;
2067 }
2068
2069 pub trait HasParamEnv<'tcx> {
2070     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2071 }
2072
2073 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2074     #[inline]
2075     fn data_layout(&self) -> &TargetDataLayout {
2076         &self.data_layout
2077     }
2078 }
2079
2080 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2081     fn target_spec(&self) -> &Target {
2082         &self.sess.target
2083     }
2084 }
2085
2086 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2087     #[inline]
2088     fn tcx(&self) -> TyCtxt<'tcx> {
2089         *self
2090     }
2091 }
2092
2093 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2094     #[inline]
2095     fn data_layout(&self) -> &TargetDataLayout {
2096         &self.data_layout
2097     }
2098 }
2099
2100 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2101     fn target_spec(&self) -> &Target {
2102         &self.sess.target
2103     }
2104 }
2105
2106 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2107     #[inline]
2108     fn tcx(&self) -> TyCtxt<'tcx> {
2109         **self
2110     }
2111 }
2112
2113 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2114     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2115         self.param_env
2116     }
2117 }
2118
2119 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2120     fn data_layout(&self) -> &TargetDataLayout {
2121         self.tcx.data_layout()
2122     }
2123 }
2124
2125 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2126     fn target_spec(&self) -> &Target {
2127         self.tcx.target_spec()
2128     }
2129 }
2130
2131 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2132     fn tcx(&self) -> TyCtxt<'tcx> {
2133         self.tcx.tcx()
2134     }
2135 }
2136
2137 pub trait MaybeResult<T> {
2138     type Error;
2139
2140     fn from(x: Result<T, Self::Error>) -> Self;
2141     fn to_result(self) -> Result<T, Self::Error>;
2142 }
2143
2144 impl<T> MaybeResult<T> for T {
2145     type Error = !;
2146
2147     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2148         x
2149     }
2150     fn to_result(self) -> Result<T, Self::Error> {
2151         Ok(self)
2152     }
2153 }
2154
2155 impl<T, E> MaybeResult<T> for Result<T, E> {
2156     type Error = E;
2157
2158     fn from(x: Result<T, Self::Error>) -> Self {
2159         x
2160     }
2161     fn to_result(self) -> Result<T, Self::Error> {
2162         self
2163     }
2164 }
2165
2166 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2167
2168 /// Trait for contexts that want to be able to compute layouts of types.
2169 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2170 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2171     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2172     /// returned from `layout_of` (see also `handle_layout_err`).
2173     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2174
2175     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2176     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2177     #[inline]
2178     fn layout_tcx_at_span(&self) -> Span {
2179         DUMMY_SP
2180     }
2181
2182     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2183     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2184     ///
2185     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2186     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2187     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2188     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2189     fn handle_layout_err(
2190         &self,
2191         err: LayoutError<'tcx>,
2192         span: Span,
2193         ty: Ty<'tcx>,
2194     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2195 }
2196
2197 /// Blanket extension trait for contexts that can compute layouts of types.
2198 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2199     /// Computes the layout of a type. Note that this implicitly
2200     /// executes in "reveal all" mode, and will normalize the input type.
2201     #[inline]
2202     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2203         self.spanned_layout_of(ty, DUMMY_SP)
2204     }
2205
2206     /// Computes the layout of a type, at `span`. Note that this implicitly
2207     /// executes in "reveal all" mode, and will normalize the input type.
2208     // FIXME(eddyb) avoid passing information like this, and instead add more
2209     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2210     #[inline]
2211     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2212         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2213         let tcx = self.tcx().at(span);
2214
2215         MaybeResult::from(
2216             tcx.layout_of(self.param_env().and(ty))
2217                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2218         )
2219     }
2220 }
2221
2222 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2223
2224 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2225     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2226
2227     #[inline]
2228     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2229         err
2230     }
2231 }
2232
2233 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2234     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2235
2236     #[inline]
2237     fn layout_tcx_at_span(&self) -> Span {
2238         self.tcx.span
2239     }
2240
2241     #[inline]
2242     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2243         err
2244     }
2245 }
2246
2247 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2248 where
2249     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2250 {
2251     fn ty_and_layout_for_variant(
2252         this: TyAndLayout<'tcx>,
2253         cx: &C,
2254         variant_index: VariantIdx,
2255     ) -> TyAndLayout<'tcx> {
2256         let layout = match this.variants {
2257             Variants::Single { index }
2258                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2259                 if index == variant_index &&
2260                 // Don't confuse variants of uninhabited enums with the enum itself.
2261                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2262                 this.fields != FieldsShape::Primitive =>
2263             {
2264                 this.layout
2265             }
2266
2267             Variants::Single { index } => {
2268                 let tcx = cx.tcx();
2269                 let param_env = cx.param_env();
2270
2271                 // Deny calling for_variant more than once for non-Single enums.
2272                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2273                     assert_eq!(original_layout.variants, Variants::Single { index });
2274                 }
2275
2276                 let fields = match this.ty.kind() {
2277                     ty::Adt(def, _) if def.variants().is_empty() =>
2278                         bug!("for_variant called on zero-variant enum"),
2279                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2280                     _ => bug!(),
2281                 };
2282                 tcx.intern_layout(LayoutS {
2283                     variants: Variants::Single { index: variant_index },
2284                     fields: match NonZeroUsize::new(fields) {
2285                         Some(fields) => FieldsShape::Union(fields),
2286                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2287                     },
2288                     abi: Abi::Uninhabited,
2289                     largest_niche: None,
2290                     align: tcx.data_layout.i8_align,
2291                     size: Size::ZERO,
2292                 })
2293             }
2294
2295             Variants::Multiple { ref variants, .. } => variants[variant_index],
2296         };
2297
2298         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2299
2300         TyAndLayout { ty: this.ty, layout }
2301     }
2302
2303     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2304         enum TyMaybeWithLayout<'tcx> {
2305             Ty(Ty<'tcx>),
2306             TyAndLayout(TyAndLayout<'tcx>),
2307         }
2308
2309         fn field_ty_or_layout<'tcx>(
2310             this: TyAndLayout<'tcx>,
2311             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2312             i: usize,
2313         ) -> TyMaybeWithLayout<'tcx> {
2314             let tcx = cx.tcx();
2315             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2316                 TyAndLayout {
2317                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2318                     ty: tag.primitive().to_ty(tcx),
2319                 }
2320             };
2321
2322             match *this.ty.kind() {
2323                 ty::Bool
2324                 | ty::Char
2325                 | ty::Int(_)
2326                 | ty::Uint(_)
2327                 | ty::Float(_)
2328                 | ty::FnPtr(_)
2329                 | ty::Never
2330                 | ty::FnDef(..)
2331                 | ty::GeneratorWitness(..)
2332                 | ty::Foreign(..)
2333                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2334
2335                 // Potentially-fat pointers.
2336                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2337                     assert!(i < this.fields.count());
2338
2339                     // Reuse the fat `*T` type as its own thin pointer data field.
2340                     // This provides information about, e.g., DST struct pointees
2341                     // (which may have no non-DST form), and will work as long
2342                     // as the `Abi` or `FieldsShape` is checked by users.
2343                     if i == 0 {
2344                         let nil = tcx.mk_unit();
2345                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2346                             tcx.mk_mut_ptr(nil)
2347                         } else {
2348                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2349                         };
2350
2351                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2352                         // the `Result` should always work because the type is
2353                         // always either `*mut ()` or `&'static mut ()`.
2354                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2355                             ty: this.ty,
2356                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2357                         });
2358                     }
2359
2360                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2361                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2362                         ty::Dynamic(_, _) => {
2363                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2364                                 tcx.lifetimes.re_static,
2365                                 tcx.mk_array(tcx.types.usize, 3),
2366                             ))
2367                             /* FIXME: use actual fn pointers
2368                             Warning: naively computing the number of entries in the
2369                             vtable by counting the methods on the trait + methods on
2370                             all parent traits does not work, because some methods can
2371                             be not object safe and thus excluded from the vtable.
2372                             Increase this counter if you tried to implement this but
2373                             failed to do it without duplicating a lot of code from
2374                             other places in the compiler: 2
2375                             tcx.mk_tup(&[
2376                                 tcx.mk_array(tcx.types.usize, 3),
2377                                 tcx.mk_array(Option<fn()>),
2378                             ])
2379                             */
2380                         }
2381                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2382                     }
2383                 }
2384
2385                 // Arrays and slices.
2386                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2387                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2388
2389                 // Tuples, generators and closures.
2390                 ty::Closure(_, ref substs) => field_ty_or_layout(
2391                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2392                     cx,
2393                     i,
2394                 ),
2395
2396                 ty::Generator(def_id, ref substs, _) => match this.variants {
2397                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2398                         substs
2399                             .as_generator()
2400                             .state_tys(def_id, tcx)
2401                             .nth(index.as_usize())
2402                             .unwrap()
2403                             .nth(i)
2404                             .unwrap(),
2405                     ),
2406                     Variants::Multiple { tag, tag_field, .. } => {
2407                         if i == tag_field {
2408                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2409                         }
2410                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2411                     }
2412                 },
2413
2414                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2415
2416                 // ADTs.
2417                 ty::Adt(def, substs) => {
2418                     match this.variants {
2419                         Variants::Single { index } => {
2420                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2421                         }
2422
2423                         // Discriminant field for enums (where applicable).
2424                         Variants::Multiple { tag, .. } => {
2425                             assert_eq!(i, 0);
2426                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2427                         }
2428                     }
2429                 }
2430
2431                 ty::Projection(_)
2432                 | ty::Bound(..)
2433                 | ty::Placeholder(..)
2434                 | ty::Opaque(..)
2435                 | ty::Param(_)
2436                 | ty::Infer(_)
2437                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2438             }
2439         }
2440
2441         match field_ty_or_layout(this, cx, i) {
2442             TyMaybeWithLayout::Ty(field_ty) => {
2443                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2444                     bug!(
2445                         "failed to get layout for `{}`: {},\n\
2446                          despite it being a field (#{}) of an existing layout: {:#?}",
2447                         field_ty,
2448                         e,
2449                         i,
2450                         this
2451                     )
2452                 })
2453             }
2454             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2455         }
2456     }
2457
2458     fn ty_and_layout_pointee_info_at(
2459         this: TyAndLayout<'tcx>,
2460         cx: &C,
2461         offset: Size,
2462     ) -> Option<PointeeInfo> {
2463         let tcx = cx.tcx();
2464         let param_env = cx.param_env();
2465
2466         let addr_space_of_ty = |ty: Ty<'tcx>| {
2467             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2468         };
2469
2470         let pointee_info = match *this.ty.kind() {
2471             ty::RawPtr(mt) if offset.bytes() == 0 => {
2472                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2473                     size: layout.size,
2474                     align: layout.align.abi,
2475                     safe: None,
2476                     address_space: addr_space_of_ty(mt.ty),
2477                 })
2478             }
2479             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2480                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2481                     size: layout.size,
2482                     align: layout.align.abi,
2483                     safe: None,
2484                     address_space: cx.data_layout().instruction_address_space,
2485                 })
2486             }
2487             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2488                 let address_space = addr_space_of_ty(ty);
2489                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2490                     // Use conservative pointer kind if not optimizing. This saves us the
2491                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2492                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2493                     PointerKind::Shared
2494                 } else {
2495                     match mt {
2496                         hir::Mutability::Not => {
2497                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2498                                 PointerKind::Frozen
2499                             } else {
2500                                 PointerKind::Shared
2501                             }
2502                         }
2503                         hir::Mutability::Mut => {
2504                             // References to self-referential structures should not be considered
2505                             // noalias, as another pointer to the structure can be obtained, that
2506                             // is not based-on the original reference. We consider all !Unpin
2507                             // types to be potentially self-referential here.
2508                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2509                                 PointerKind::UniqueBorrowed
2510                             } else {
2511                                 PointerKind::Shared
2512                             }
2513                         }
2514                     }
2515                 };
2516
2517                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2518                     size: layout.size,
2519                     align: layout.align.abi,
2520                     safe: Some(kind),
2521                     address_space,
2522                 })
2523             }
2524
2525             _ => {
2526                 let mut data_variant = match this.variants {
2527                     // Within the discriminant field, only the niche itself is
2528                     // always initialized, so we only check for a pointer at its
2529                     // offset.
2530                     //
2531                     // If the niche is a pointer, it's either valid (according
2532                     // to its type), or null (which the niche field's scalar
2533                     // validity range encodes).  This allows using
2534                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2535                     // this will continue to work as long as we don't start
2536                     // using more niches than just null (e.g., the first page of
2537                     // the address space, or unaligned pointers).
2538                     Variants::Multiple {
2539                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2540                         tag_field,
2541                         ..
2542                     } if this.fields.offset(tag_field) == offset => {
2543                         Some(this.for_variant(cx, dataful_variant))
2544                     }
2545                     _ => Some(this),
2546                 };
2547
2548                 if let Some(variant) = data_variant {
2549                     // We're not interested in any unions.
2550                     if let FieldsShape::Union(_) = variant.fields {
2551                         data_variant = None;
2552                     }
2553                 }
2554
2555                 let mut result = None;
2556
2557                 if let Some(variant) = data_variant {
2558                     let ptr_end = offset + Pointer.size(cx);
2559                     for i in 0..variant.fields.count() {
2560                         let field_start = variant.fields.offset(i);
2561                         if field_start <= offset {
2562                             let field = variant.field(cx, i);
2563                             result = field.to_result().ok().and_then(|field| {
2564                                 if ptr_end <= field_start + field.size {
2565                                     // We found the right field, look inside it.
2566                                     let field_info =
2567                                         field.pointee_info_at(cx, offset - field_start);
2568                                     field_info
2569                                 } else {
2570                                     None
2571                                 }
2572                             });
2573                             if result.is_some() {
2574                                 break;
2575                             }
2576                         }
2577                     }
2578                 }
2579
2580                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2581                 if let Some(ref mut pointee) = result {
2582                     if let ty::Adt(def, _) = this.ty.kind() {
2583                         if def.is_box() && offset.bytes() == 0 {
2584                             pointee.safe = Some(PointerKind::UniqueOwned);
2585                         }
2586                     }
2587                 }
2588
2589                 result
2590             }
2591         };
2592
2593         debug!(
2594             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2595             offset,
2596             this.ty.kind(),
2597             pointee_info
2598         );
2599
2600         pointee_info
2601     }
2602 }
2603
2604 impl<'tcx> ty::Instance<'tcx> {
2605     // NOTE(eddyb) this is private to avoid using it from outside of
2606     // `fn_abi_of_instance` - any other uses are either too high-level
2607     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2608     // or should go through `FnAbi` instead, to avoid losing any
2609     // adjustments `fn_abi_of_instance` might be performing.
2610     fn fn_sig_for_fn_abi(
2611         &self,
2612         tcx: TyCtxt<'tcx>,
2613         param_env: ty::ParamEnv<'tcx>,
2614     ) -> ty::PolyFnSig<'tcx> {
2615         let ty = self.ty(tcx, param_env);
2616         match *ty.kind() {
2617             ty::FnDef(..) => {
2618                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2619                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2620                 // (i.e. due to being inside a projection that got normalized, see
2621                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2622                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2623                 let mut sig = match *ty.kind() {
2624                     ty::FnDef(def_id, substs) => tcx
2625                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2626                         .subst(tcx, substs),
2627                     _ => unreachable!(),
2628                 };
2629
2630                 if let ty::InstanceDef::VtableShim(..) = self.def {
2631                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2632                     sig = sig.map_bound(|mut sig| {
2633                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2634                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2635                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2636                         sig
2637                     });
2638                 }
2639                 sig
2640             }
2641             ty::Closure(def_id, substs) => {
2642                 let sig = substs.as_closure().sig();
2643
2644                 let bound_vars = tcx.mk_bound_variable_kinds(
2645                     sig.bound_vars()
2646                         .iter()
2647                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2648                 );
2649                 let br = ty::BoundRegion {
2650                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2651                     kind: ty::BoundRegionKind::BrEnv,
2652                 };
2653                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2654                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2655
2656                 let sig = sig.skip_binder();
2657                 ty::Binder::bind_with_vars(
2658                     tcx.mk_fn_sig(
2659                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2660                         sig.output(),
2661                         sig.c_variadic,
2662                         sig.unsafety,
2663                         sig.abi,
2664                     ),
2665                     bound_vars,
2666                 )
2667             }
2668             ty::Generator(_, substs, _) => {
2669                 let sig = substs.as_generator().poly_sig();
2670
2671                 let bound_vars = tcx.mk_bound_variable_kinds(
2672                     sig.bound_vars()
2673                         .iter()
2674                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2675                 );
2676                 let br = ty::BoundRegion {
2677                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2678                     kind: ty::BoundRegionKind::BrEnv,
2679                 };
2680                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2681                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2682
2683                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2684                 let pin_adt_ref = tcx.adt_def(pin_did);
2685                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2686                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2687
2688                 let sig = sig.skip_binder();
2689                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2690                 let state_adt_ref = tcx.adt_def(state_did);
2691                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2692                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2693                 ty::Binder::bind_with_vars(
2694                     tcx.mk_fn_sig(
2695                         [env_ty, sig.resume_ty].iter(),
2696                         &ret_ty,
2697                         false,
2698                         hir::Unsafety::Normal,
2699                         rustc_target::spec::abi::Abi::Rust,
2700                     ),
2701                     bound_vars,
2702                 )
2703             }
2704             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2705         }
2706     }
2707 }
2708
2709 /// Calculates whether a function's ABI can unwind or not.
2710 ///
2711 /// This takes two primary parameters:
2712 ///
2713 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2714 ///   codegen attrs for a defined function. For function pointers this set of
2715 ///   flags is the empty set. This is only applicable for Rust-defined
2716 ///   functions, and generally isn't needed except for small optimizations where
2717 ///   we try to say a function which otherwise might look like it could unwind
2718 ///   doesn't actually unwind (such as for intrinsics and such).
2719 ///
2720 /// * `abi` - this is the ABI that the function is defined with. This is the
2721 ///   primary factor for determining whether a function can unwind or not.
2722 ///
2723 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2724 /// panics are implemented with unwinds on most platform (when
2725 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2726 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2727 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2728 /// defined for each ABI individually, but it always corresponds to some form of
2729 /// stack-based unwinding (the exact mechanism of which varies
2730 /// platform-by-platform).
2731 ///
2732 /// Rust functions are classified whether or not they can unwind based on the
2733 /// active "panic strategy". In other words Rust functions are considered to
2734 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2735 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2736 /// only if the final panic mode is panic=abort. In this scenario any code
2737 /// previously compiled assuming that a function can unwind is still correct, it
2738 /// just never happens to actually unwind at runtime.
2739 ///
2740 /// This function's answer to whether or not a function can unwind is quite
2741 /// impactful throughout the compiler. This affects things like:
2742 ///
2743 /// * Calling a function which can't unwind means codegen simply ignores any
2744 ///   associated unwinding cleanup.
2745 /// * Calling a function which can unwind from a function which can't unwind
2746 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2747 ///   aborts the process.
2748 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2749 ///   affects various optimizations and codegen.
2750 ///
2751 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2752 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2753 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2754 /// might (from a foreign exception or similar).
2755 #[inline]
2756 pub fn fn_can_unwind<'tcx>(
2757     tcx: TyCtxt<'tcx>,
2758     codegen_fn_attr_flags: CodegenFnAttrFlags,
2759     abi: SpecAbi,
2760 ) -> bool {
2761     // Special attribute for functions which can't unwind.
2762     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2763         return false;
2764     }
2765
2766     // Otherwise if this isn't special then unwinding is generally determined by
2767     // the ABI of the itself. ABIs like `C` have variants which also
2768     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2769     // ABIs have such an option. Otherwise the only other thing here is Rust
2770     // itself, and those ABIs are determined by the panic strategy configured
2771     // for this compilation.
2772     //
2773     // Unfortunately at this time there's also another caveat. Rust [RFC
2774     // 2945][rfc] has been accepted and is in the process of being implemented
2775     // and stabilized. In this interim state we need to deal with historical
2776     // rustc behavior as well as plan for future rustc behavior.
2777     //
2778     // Historically functions declared with `extern "C"` were marked at the
2779     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2780     // or not. This is UB for functions in `panic=unwind` mode that then
2781     // actually panic and unwind. Note that this behavior is true for both
2782     // externally declared functions as well as Rust-defined function.
2783     //
2784     // To fix this UB rustc would like to change in the future to catch unwinds
2785     // from function calls that may unwind within a Rust-defined `extern "C"`
2786     // function and forcibly abort the process, thereby respecting the
2787     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2788     // ready to roll out, so determining whether or not the `C` family of ABIs
2789     // unwinds is conditional not only on their definition but also whether the
2790     // `#![feature(c_unwind)]` feature gate is active.
2791     //
2792     // Note that this means that unlike historical compilers rustc now, by
2793     // default, unconditionally thinks that the `C` ABI may unwind. This will
2794     // prevent some optimization opportunities, however, so we try to scope this
2795     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2796     // to `panic=abort`).
2797     //
2798     // Eventually the check against `c_unwind` here will ideally get removed and
2799     // this'll be a little cleaner as it'll be a straightforward check of the
2800     // ABI.
2801     //
2802     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2803     use SpecAbi::*;
2804     match abi {
2805         C { unwind }
2806         | System { unwind }
2807         | Cdecl { unwind }
2808         | Stdcall { unwind }
2809         | Fastcall { unwind }
2810         | Vectorcall { unwind }
2811         | Thiscall { unwind }
2812         | Aapcs { unwind }
2813         | Win64 { unwind }
2814         | SysV64 { unwind } => {
2815             unwind
2816                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2817         }
2818         PtxKernel
2819         | Msp430Interrupt
2820         | X86Interrupt
2821         | AmdGpuKernel
2822         | EfiApi
2823         | AvrInterrupt
2824         | AvrNonBlockingInterrupt
2825         | CCmseNonSecureCall
2826         | Wasm
2827         | RustIntrinsic
2828         | PlatformIntrinsic
2829         | Unadjusted => false,
2830         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2831     }
2832 }
2833
2834 #[inline]
2835 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2836     use rustc_target::spec::abi::Abi::*;
2837     match tcx.sess.target.adjust_abi(abi) {
2838         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2839
2840         // It's the ABI's job to select this, not ours.
2841         System { .. } => bug!("system abi should be selected elsewhere"),
2842         EfiApi => bug!("eficall abi should be selected elsewhere"),
2843
2844         Stdcall { .. } => Conv::X86Stdcall,
2845         Fastcall { .. } => Conv::X86Fastcall,
2846         Vectorcall { .. } => Conv::X86VectorCall,
2847         Thiscall { .. } => Conv::X86ThisCall,
2848         C { .. } => Conv::C,
2849         Unadjusted => Conv::C,
2850         Win64 { .. } => Conv::X86_64Win64,
2851         SysV64 { .. } => Conv::X86_64SysV,
2852         Aapcs { .. } => Conv::ArmAapcs,
2853         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2854         PtxKernel => Conv::PtxKernel,
2855         Msp430Interrupt => Conv::Msp430Intr,
2856         X86Interrupt => Conv::X86Intr,
2857         AmdGpuKernel => Conv::AmdGpuKernel,
2858         AvrInterrupt => Conv::AvrInterrupt,
2859         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2860         Wasm => Conv::C,
2861
2862         // These API constants ought to be more specific...
2863         Cdecl { .. } => Conv::C,
2864     }
2865 }
2866
2867 /// Error produced by attempting to compute or adjust a `FnAbi`.
2868 #[derive(Copy, Clone, Debug, HashStable)]
2869 pub enum FnAbiError<'tcx> {
2870     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2871     Layout(LayoutError<'tcx>),
2872
2873     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2874     AdjustForForeignAbi(call::AdjustForForeignAbiError),
2875 }
2876
2877 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2878     fn from(err: LayoutError<'tcx>) -> Self {
2879         Self::Layout(err)
2880     }
2881 }
2882
2883 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2884     fn from(err: call::AdjustForForeignAbiError) -> Self {
2885         Self::AdjustForForeignAbi(err)
2886     }
2887 }
2888
2889 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2890     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2891         match self {
2892             Self::Layout(err) => err.fmt(f),
2893             Self::AdjustForForeignAbi(err) => err.fmt(f),
2894         }
2895     }
2896 }
2897
2898 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2899 // just for error handling.
2900 #[derive(Debug)]
2901 pub enum FnAbiRequest<'tcx> {
2902     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2903     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2904 }
2905
2906 /// Trait for contexts that want to be able to compute `FnAbi`s.
2907 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2908 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2909     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2910     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2911     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2912
2913     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2914     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2915     ///
2916     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2917     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2918     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2919     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2920     fn handle_fn_abi_err(
2921         &self,
2922         err: FnAbiError<'tcx>,
2923         span: Span,
2924         fn_abi_request: FnAbiRequest<'tcx>,
2925     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2926 }
2927
2928 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2929 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2930     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2931     ///
2932     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2933     /// instead, where the instance is an `InstanceDef::Virtual`.
2934     #[inline]
2935     fn fn_abi_of_fn_ptr(
2936         &self,
2937         sig: ty::PolyFnSig<'tcx>,
2938         extra_args: &'tcx ty::List<Ty<'tcx>>,
2939     ) -> Self::FnAbiOfResult {
2940         // FIXME(eddyb) get a better `span` here.
2941         let span = self.layout_tcx_at_span();
2942         let tcx = self.tcx().at(span);
2943
2944         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2945             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2946         ))
2947     }
2948
2949     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2950     /// direct calls to an `fn`.
2951     ///
2952     /// NB: that includes virtual calls, which are represented by "direct calls"
2953     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2954     #[inline]
2955     fn fn_abi_of_instance(
2956         &self,
2957         instance: ty::Instance<'tcx>,
2958         extra_args: &'tcx ty::List<Ty<'tcx>>,
2959     ) -> Self::FnAbiOfResult {
2960         // FIXME(eddyb) get a better `span` here.
2961         let span = self.layout_tcx_at_span();
2962         let tcx = self.tcx().at(span);
2963
2964         MaybeResult::from(
2965             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2966                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2967                 // we can get some kind of span even if one wasn't provided.
2968                 // However, we don't do this early in order to avoid calling
2969                 // `def_span` unconditionally (which may have a perf penalty).
2970                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2971                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2972             }),
2973         )
2974     }
2975 }
2976
2977 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2978
2979 fn fn_abi_of_fn_ptr<'tcx>(
2980     tcx: TyCtxt<'tcx>,
2981     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2982 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2983     let (param_env, (sig, extra_args)) = query.into_parts();
2984
2985     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2986         sig,
2987         extra_args,
2988         None,
2989         CodegenFnAttrFlags::empty(),
2990         false,
2991     )
2992 }
2993
2994 fn fn_abi_of_instance<'tcx>(
2995     tcx: TyCtxt<'tcx>,
2996     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2997 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2998     let (param_env, (instance, extra_args)) = query.into_parts();
2999
3000     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3001
3002     let caller_location = if instance.def.requires_caller_location(tcx) {
3003         Some(tcx.caller_location_ty())
3004     } else {
3005         None
3006     };
3007
3008     let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
3009
3010     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3011         sig,
3012         extra_args,
3013         caller_location,
3014         attrs,
3015         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3016     )
3017 }
3018
3019 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3020     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3021     // arguments of this method, into a separate `struct`.
3022     fn fn_abi_new_uncached(
3023         &self,
3024         sig: ty::PolyFnSig<'tcx>,
3025         extra_args: &[Ty<'tcx>],
3026         caller_location: Option<Ty<'tcx>>,
3027         codegen_fn_attr_flags: CodegenFnAttrFlags,
3028         // FIXME(eddyb) replace this with something typed, like an `enum`.
3029         force_thin_self_ptr: bool,
3030     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3031         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3032
3033         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3034
3035         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3036
3037         let mut inputs = sig.inputs();
3038         let extra_args = if sig.abi == RustCall {
3039             assert!(!sig.c_variadic && extra_args.is_empty());
3040
3041             if let Some(input) = sig.inputs().last() {
3042                 if let ty::Tuple(tupled_arguments) = input.kind() {
3043                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3044                     tupled_arguments
3045                 } else {
3046                     bug!(
3047                         "argument to function with \"rust-call\" ABI \
3048                             is not a tuple"
3049                     );
3050                 }
3051             } else {
3052                 bug!(
3053                     "argument to function with \"rust-call\" ABI \
3054                         is not a tuple"
3055                 );
3056             }
3057         } else {
3058             assert!(sig.c_variadic || extra_args.is_empty());
3059             extra_args
3060         };
3061
3062         let target = &self.tcx.sess.target;
3063         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3064         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3065         let linux_s390x_gnu_like =
3066             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3067         let linux_sparc64_gnu_like =
3068             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3069         let linux_powerpc_gnu_like =
3070             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3071         use SpecAbi::*;
3072         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3073
3074         // Handle safe Rust thin and fat pointers.
3075         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3076                                       scalar: Scalar,
3077                                       layout: TyAndLayout<'tcx>,
3078                                       offset: Size,
3079                                       is_return: bool| {
3080             // Booleans are always a noundef i1 that needs to be zero-extended.
3081             if scalar.is_bool() {
3082                 attrs.ext(ArgExtension::Zext);
3083                 attrs.set(ArgAttribute::NoUndef);
3084                 return;
3085             }
3086
3087             // Scalars which have invalid values cannot be undef.
3088             if !scalar.is_always_valid(self) {
3089                 attrs.set(ArgAttribute::NoUndef);
3090             }
3091
3092             // Only pointer types handled below.
3093             let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3094
3095             if !valid_range.contains(0) {
3096                 attrs.set(ArgAttribute::NonNull);
3097             }
3098
3099             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3100                 if let Some(kind) = pointee.safe {
3101                     attrs.pointee_align = Some(pointee.align);
3102
3103                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3104                     // for the entire duration of the function as they can be deallocated
3105                     // at any time. Set their valid size to 0.
3106                     attrs.pointee_size = match kind {
3107                         PointerKind::UniqueOwned => Size::ZERO,
3108                         _ => pointee.size,
3109                     };
3110
3111                     // `Box`, `&T`, and `&mut T` cannot be undef.
3112                     // Note that this only applies to the value of the pointer itself;
3113                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3114                     attrs.set(ArgAttribute::NoUndef);
3115
3116                     // `Box` pointer parameters never alias because ownership is transferred
3117                     // `&mut` pointer parameters never alias other parameters,
3118                     // or mutable global data
3119                     //
3120                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3121                     // and can be marked as both `readonly` and `noalias`, as
3122                     // LLVM's definition of `noalias` is based solely on memory
3123                     // dependencies rather than pointer equality
3124                     //
3125                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3126                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3127                     // or not to actually emit the attribute. It can also be controlled with the
3128                     // `-Zmutable-noalias` debugging option.
3129                     let no_alias = match kind {
3130                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3131                         PointerKind::UniqueOwned => true,
3132                         PointerKind::Frozen => !is_return,
3133                     };
3134                     if no_alias {
3135                         attrs.set(ArgAttribute::NoAlias);
3136                     }
3137
3138                     if kind == PointerKind::Frozen && !is_return {
3139                         attrs.set(ArgAttribute::ReadOnly);
3140                     }
3141
3142                     if kind == PointerKind::UniqueBorrowed && !is_return {
3143                         attrs.set(ArgAttribute::NoAliasMutRef);
3144                     }
3145                 }
3146             }
3147         };
3148
3149         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3150             let is_return = arg_idx.is_none();
3151
3152             let layout = self.layout_of(ty)?;
3153             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3154                 // Don't pass the vtable, it's not an argument of the virtual fn.
3155                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3156                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3157                 make_thin_self_ptr(self, layout)
3158             } else {
3159                 layout
3160             };
3161
3162             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3163                 let mut attrs = ArgAttributes::new();
3164                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3165                 attrs
3166             });
3167
3168             if arg.layout.is_zst() {
3169                 // For some forsaken reason, x86_64-pc-windows-gnu
3170                 // doesn't ignore zero-sized struct arguments.
3171                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3172                 if is_return
3173                     || rust_abi
3174                     || (!win_x64_gnu
3175                         && !linux_s390x_gnu_like
3176                         && !linux_sparc64_gnu_like
3177                         && !linux_powerpc_gnu_like)
3178                 {
3179                     arg.mode = PassMode::Ignore;
3180                 }
3181             }
3182
3183             Ok(arg)
3184         };
3185
3186         let mut fn_abi = FnAbi {
3187             ret: arg_of(sig.output(), None)?,
3188             args: inputs
3189                 .iter()
3190                 .copied()
3191                 .chain(extra_args.iter().copied())
3192                 .chain(caller_location)
3193                 .enumerate()
3194                 .map(|(i, ty)| arg_of(ty, Some(i)))
3195                 .collect::<Result<_, _>>()?,
3196             c_variadic: sig.c_variadic,
3197             fixed_count: inputs.len(),
3198             conv,
3199             can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3200         };
3201         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3202         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3203         Ok(self.tcx.arena.alloc(fn_abi))
3204     }
3205
3206     fn fn_abi_adjust_for_abi(
3207         &self,
3208         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3209         abi: SpecAbi,
3210     ) -> Result<(), FnAbiError<'tcx>> {
3211         if abi == SpecAbi::Unadjusted {
3212             return Ok(());
3213         }
3214
3215         if abi == SpecAbi::Rust
3216             || abi == SpecAbi::RustCall
3217             || abi == SpecAbi::RustIntrinsic
3218             || abi == SpecAbi::PlatformIntrinsic
3219         {
3220             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3221                 if arg.is_ignore() {
3222                     return;
3223                 }
3224
3225                 match arg.layout.abi {
3226                     Abi::Aggregate { .. } => {}
3227
3228                     // This is a fun case! The gist of what this is doing is
3229                     // that we want callers and callees to always agree on the
3230                     // ABI of how they pass SIMD arguments. If we were to *not*
3231                     // make these arguments indirect then they'd be immediates
3232                     // in LLVM, which means that they'd used whatever the
3233                     // appropriate ABI is for the callee and the caller. That
3234                     // means, for example, if the caller doesn't have AVX
3235                     // enabled but the callee does, then passing an AVX argument
3236                     // across this boundary would cause corrupt data to show up.
3237                     //
3238                     // This problem is fixed by unconditionally passing SIMD
3239                     // arguments through memory between callers and callees
3240                     // which should get them all to agree on ABI regardless of
3241                     // target feature sets. Some more information about this
3242                     // issue can be found in #44367.
3243                     //
3244                     // Note that the platform intrinsic ABI is exempt here as
3245                     // that's how we connect up to LLVM and it's unstable
3246                     // anyway, we control all calls to it in libstd.
3247                     Abi::Vector { .. }
3248                         if abi != SpecAbi::PlatformIntrinsic
3249                             && self.tcx.sess.target.simd_types_indirect =>
3250                     {
3251                         arg.make_indirect();
3252                         return;
3253                     }
3254
3255                     _ => return,
3256                 }
3257
3258                 let size = arg.layout.size;
3259                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3260                     arg.make_indirect();
3261                 } else {
3262                     // We want to pass small aggregates as immediates, but using
3263                     // a LLVM aggregate type for this leads to bad optimizations,
3264                     // so we pick an appropriately sized integer type instead.
3265                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3266                 }
3267             };
3268             fixup(&mut fn_abi.ret);
3269             for arg in &mut fn_abi.args {
3270                 fixup(arg);
3271             }
3272         } else {
3273             fn_abi.adjust_for_foreign_abi(self, abi)?;
3274         }
3275
3276         Ok(())
3277     }
3278 }
3279
3280 fn make_thin_self_ptr<'tcx>(
3281     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3282     layout: TyAndLayout<'tcx>,
3283 ) -> TyAndLayout<'tcx> {
3284     let tcx = cx.tcx();
3285     let fat_pointer_ty = if layout.is_unsized() {
3286         // unsized `self` is passed as a pointer to `self`
3287         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3288         tcx.mk_mut_ptr(layout.ty)
3289     } else {
3290         match layout.abi {
3291             Abi::ScalarPair(..) => (),
3292             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3293         }
3294
3295         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3296         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3297         // elsewhere in the compiler as a method on a `dyn Trait`.
3298         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3299         // get a built-in pointer type
3300         let mut fat_pointer_layout = layout;
3301         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3302             && !fat_pointer_layout.ty.is_region_ptr()
3303         {
3304             for i in 0..fat_pointer_layout.fields.count() {
3305                 let field_layout = fat_pointer_layout.field(cx, i);
3306
3307                 if !field_layout.is_zst() {
3308                     fat_pointer_layout = field_layout;
3309                     continue 'descend_newtypes;
3310                 }
3311             }
3312
3313             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3314         }
3315
3316         fat_pointer_layout.ty
3317     };
3318
3319     // we now have a type like `*mut RcBox<dyn Trait>`
3320     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3321     // this is understood as a special case elsewhere in the compiler
3322     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3323
3324     TyAndLayout {
3325         ty: fat_pointer_ty,
3326
3327         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3328         // should always work because the type is always `*mut ()`.
3329         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3330     }
3331 }