]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Auto merge of #95170 - jyn514:ci-llvm, r=Mark-Simulacrum
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_hir as hir;
9 use rustc_hir::lang_items::LangItem;
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::{Idx, IndexVec};
12 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::{Span, DUMMY_SP};
15 use rustc_target::abi::call::{
16     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
17 };
18 use rustc_target::abi::*;
19 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
20
21 use std::cmp;
22 use std::fmt;
23 use std::iter;
24 use std::num::NonZeroUsize;
25 use std::ops::Bound;
26
27 use rand::{seq::SliceRandom, SeedableRng};
28 use rand_xoshiro::Xoshiro128StarStar;
29
30 pub fn provide(providers: &mut ty::query::Providers) {
31     *providers =
32         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
33 }
34
35 pub trait IntegerExt {
36     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
37     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
38     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
39     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
40     fn repr_discr<'tcx>(
41         tcx: TyCtxt<'tcx>,
42         ty: Ty<'tcx>,
43         repr: &ReprOptions,
44         min: i128,
45         max: i128,
46     ) -> (Integer, bool);
47 }
48
49 impl IntegerExt for Integer {
50     #[inline]
51     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
52         match (*self, signed) {
53             (I8, false) => tcx.types.u8,
54             (I16, false) => tcx.types.u16,
55             (I32, false) => tcx.types.u32,
56             (I64, false) => tcx.types.u64,
57             (I128, false) => tcx.types.u128,
58             (I8, true) => tcx.types.i8,
59             (I16, true) => tcx.types.i16,
60             (I32, true) => tcx.types.i32,
61             (I64, true) => tcx.types.i64,
62             (I128, true) => tcx.types.i128,
63         }
64     }
65
66     /// Gets the Integer type from an attr::IntType.
67     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
68         let dl = cx.data_layout();
69
70         match ity {
71             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
72             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
73             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
74             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
75             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
76             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
77                 dl.ptr_sized_integer()
78             }
79         }
80     }
81
82     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
83         match ity {
84             ty::IntTy::I8 => I8,
85             ty::IntTy::I16 => I16,
86             ty::IntTy::I32 => I32,
87             ty::IntTy::I64 => I64,
88             ty::IntTy::I128 => I128,
89             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
90         }
91     }
92     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
93         match ity {
94             ty::UintTy::U8 => I8,
95             ty::UintTy::U16 => I16,
96             ty::UintTy::U32 => I32,
97             ty::UintTy::U64 => I64,
98             ty::UintTy::U128 => I128,
99             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
100         }
101     }
102
103     /// Finds the appropriate Integer type and signedness for the given
104     /// signed discriminant range and `#[repr]` attribute.
105     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
106     /// that shouldn't affect anything, other than maybe debuginfo.
107     fn repr_discr<'tcx>(
108         tcx: TyCtxt<'tcx>,
109         ty: Ty<'tcx>,
110         repr: &ReprOptions,
111         min: i128,
112         max: i128,
113     ) -> (Integer, bool) {
114         // Theoretically, negative values could be larger in unsigned representation
115         // than the unsigned representation of the signed minimum. However, if there
116         // are any negative values, the only valid unsigned representation is u128
117         // which can fit all i128 values, so the result remains unaffected.
118         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
119         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
120
121         if let Some(ity) = repr.int {
122             let discr = Integer::from_attr(&tcx, ity);
123             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
124             if discr < fit {
125                 bug!(
126                     "Integer::repr_discr: `#[repr]` hint too small for \
127                       discriminant range of enum `{}",
128                     ty
129                 )
130             }
131             return (discr, ity.is_signed());
132         }
133
134         let at_least = if repr.c() {
135             // This is usually I32, however it can be different on some platforms,
136             // notably hexagon and arm-none/thumb-none
137             tcx.data_layout().c_enum_min_size
138         } else {
139             // repr(Rust) enums try to be as small as possible
140             I8
141         };
142
143         // If there are no negative values, we can use the unsigned fit.
144         if min >= 0 {
145             (cmp::max(unsigned_fit, at_least), false)
146         } else {
147             (cmp::max(signed_fit, at_least), true)
148         }
149     }
150 }
151
152 pub trait PrimitiveExt {
153     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
154     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 }
156
157 impl PrimitiveExt for Primitive {
158     #[inline]
159     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
160         match *self {
161             Int(i, signed) => i.to_ty(tcx, signed),
162             F32 => tcx.types.f32,
163             F64 => tcx.types.f64,
164             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
165         }
166     }
167
168     /// Return an *integer* type matching this primitive.
169     /// Useful in particular when dealing with enum discriminants.
170     #[inline]
171     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
172         match *self {
173             Int(i, signed) => i.to_ty(tcx, signed),
174             Pointer => tcx.types.usize,
175             F32 | F64 => bug!("floats do not have an int type"),
176         }
177     }
178 }
179
180 /// The first half of a fat pointer.
181 ///
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
185
186 /// The second half of a fat pointer.
187 ///
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
191
192 /// The maximum supported number of lanes in a SIMD vector.
193 ///
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
198
199 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
201     Unknown(Ty<'tcx>),
202     SizeOverflow(Ty<'tcx>),
203     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
204 }
205
206 impl<'tcx> fmt::Display for LayoutError<'tcx> {
207     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208         match *self {
209             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
210             LayoutError::SizeOverflow(ty) => {
211                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
212             }
213             LayoutError::NormalizationFailure(t, e) => write!(
214                 f,
215                 "unable to determine layout for `{}` because `{}` cannot be normalized",
216                 t,
217                 e.get_type_for_failure()
218             ),
219         }
220     }
221 }
222
223 #[instrument(skip(tcx, query), level = "debug")]
224 fn layout_of<'tcx>(
225     tcx: TyCtxt<'tcx>,
226     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
227 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
228     ty::tls::with_related_context(tcx, move |icx| {
229         let (param_env, ty) = query.into_parts();
230         debug!(?ty);
231
232         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
233             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
234         }
235
236         // Update the ImplicitCtxt to increase the layout_depth
237         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
238
239         ty::tls::enter_context(&icx, |_| {
240             let param_env = param_env.with_reveal_all_normalized(tcx);
241             let unnormalized_ty = ty;
242
243             // FIXME: We might want to have two different versions of `layout_of`:
244             // One that can be called after typecheck has completed and can use
245             // `normalize_erasing_regions` here and another one that can be called
246             // before typecheck has completed and uses `try_normalize_erasing_regions`.
247             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
248                 Ok(t) => t,
249                 Err(normalization_error) => {
250                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
251                 }
252             };
253
254             if ty != unnormalized_ty {
255                 // Ensure this layout is also cached for the normalized type.
256                 return tcx.layout_of(param_env.and(ty));
257             }
258
259             let cx = LayoutCx { tcx, param_env };
260
261             let layout = cx.layout_of_uncached(ty)?;
262             let layout = TyAndLayout { ty, layout };
263
264             cx.record_layout_for_printing(layout);
265
266             // Type-level uninhabitedness should always imply ABI uninhabitedness.
267             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
268                 assert!(layout.abi.is_uninhabited());
269             }
270
271             Ok(layout)
272         })
273     })
274 }
275
276 pub struct LayoutCx<'tcx, C> {
277     pub tcx: C,
278     pub param_env: ty::ParamEnv<'tcx>,
279 }
280
281 #[derive(Copy, Clone, Debug)]
282 enum StructKind {
283     /// A tuple, closure, or univariant which cannot be coerced to unsized.
284     AlwaysSized,
285     /// A univariant, the last field of which may be coerced to unsized.
286     MaybeUnsized,
287     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
288     Prefixed(Size, Align),
289 }
290
291 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
292 // This is used to go between `memory_index` (source field order to memory order)
293 // and `inverse_memory_index` (memory order to source field order).
294 // See also `FieldsShape::Arbitrary::memory_index` for more details.
295 // FIXME(eddyb) build a better abstraction for permutations, if possible.
296 fn invert_mapping(map: &[u32]) -> Vec<u32> {
297     let mut inverse = vec![0; map.len()];
298     for i in 0..map.len() {
299         inverse[map[i] as usize] = i as u32;
300     }
301     inverse
302 }
303
304 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
305     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
306         let dl = self.data_layout();
307         let b_align = b.align(dl);
308         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
309         let b_offset = a.size(dl).align_to(b_align.abi);
310         let size = (b_offset + b.size(dl)).align_to(align.abi);
311
312         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
313         // returns the last maximum.
314         let largest_niche = Niche::from_scalar(dl, b_offset, b)
315             .into_iter()
316             .chain(Niche::from_scalar(dl, Size::ZERO, a))
317             .max_by_key(|niche| niche.available(dl));
318
319         LayoutS {
320             variants: Variants::Single { index: VariantIdx::new(0) },
321             fields: FieldsShape::Arbitrary {
322                 offsets: vec![Size::ZERO, b_offset],
323                 memory_index: vec![0, 1],
324             },
325             abi: Abi::ScalarPair(a, b),
326             largest_niche,
327             align,
328             size,
329         }
330     }
331
332     fn univariant_uninterned(
333         &self,
334         ty: Ty<'tcx>,
335         fields: &[TyAndLayout<'_>],
336         repr: &ReprOptions,
337         kind: StructKind,
338     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
339         let dl = self.data_layout();
340         let pack = repr.pack;
341         if pack.is_some() && repr.align.is_some() {
342             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
343             return Err(LayoutError::Unknown(ty));
344         }
345
346         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
347
348         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
349
350         let optimize = !repr.inhibit_struct_field_reordering_opt();
351         if optimize {
352             let end =
353                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
354             let optimizing = &mut inverse_memory_index[..end];
355             let field_align = |f: &TyAndLayout<'_>| {
356                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
357             };
358
359             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
360             // the field ordering to try and catch some code making assumptions about layouts
361             // we don't guarantee
362             if repr.can_randomize_type_layout() {
363                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
364                 // randomize field ordering with
365                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
366
367                 // Shuffle the ordering of the fields
368                 optimizing.shuffle(&mut rng);
369
370             // Otherwise we just leave things alone and actually optimize the type's fields
371             } else {
372                 match kind {
373                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
374                         optimizing.sort_by_key(|&x| {
375                             // Place ZSTs first to avoid "interesting offsets",
376                             // especially with only one or two non-ZST fields.
377                             let f = &fields[x as usize];
378                             (!f.is_zst(), cmp::Reverse(field_align(f)))
379                         });
380                     }
381
382                     StructKind::Prefixed(..) => {
383                         // Sort in ascending alignment so that the layout stays optimal
384                         // regardless of the prefix
385                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
386                     }
387                 }
388
389                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
390                 //                 regardless of the status of `-Z randomize-layout`
391             }
392         }
393
394         // inverse_memory_index holds field indices by increasing memory offset.
395         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
396         // We now write field offsets to the corresponding offset slot;
397         // field 5 with offset 0 puts 0 in offsets[5].
398         // At the bottom of this function, we invert `inverse_memory_index` to
399         // produce `memory_index` (see `invert_mapping`).
400
401         let mut sized = true;
402         let mut offsets = vec![Size::ZERO; fields.len()];
403         let mut offset = Size::ZERO;
404         let mut largest_niche = None;
405         let mut largest_niche_available = 0;
406
407         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
408             let prefix_align =
409                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
410             align = align.max(AbiAndPrefAlign::new(prefix_align));
411             offset = prefix_size.align_to(prefix_align);
412         }
413
414         for &i in &inverse_memory_index {
415             let field = fields[i as usize];
416             if !sized {
417                 self.tcx.sess.delay_span_bug(
418                     DUMMY_SP,
419                     &format!(
420                         "univariant: field #{} of `{}` comes after unsized field",
421                         offsets.len(),
422                         ty
423                     ),
424                 );
425             }
426
427             if field.is_unsized() {
428                 sized = false;
429             }
430
431             // Invariant: offset < dl.obj_size_bound() <= 1<<61
432             let field_align = if let Some(pack) = pack {
433                 field.align.min(AbiAndPrefAlign::new(pack))
434             } else {
435                 field.align
436             };
437             offset = offset.align_to(field_align.abi);
438             align = align.max(field_align);
439
440             debug!("univariant offset: {:?} field: {:#?}", offset, field);
441             offsets[i as usize] = offset;
442
443             if !repr.hide_niche() {
444                 if let Some(mut niche) = field.largest_niche {
445                     let available = niche.available(dl);
446                     if available > largest_niche_available {
447                         largest_niche_available = available;
448                         niche.offset += offset;
449                         largest_niche = Some(niche);
450                     }
451                 }
452             }
453
454             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
455         }
456
457         if let Some(repr_align) = repr.align {
458             align = align.max(AbiAndPrefAlign::new(repr_align));
459         }
460
461         debug!("univariant min_size: {:?}", offset);
462         let min_size = offset;
463
464         // As stated above, inverse_memory_index holds field indices by increasing offset.
465         // This makes it an already-sorted view of the offsets vec.
466         // To invert it, consider:
467         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
468         // Field 5 would be the first element, so memory_index is i:
469         // Note: if we didn't optimize, it's already right.
470
471         let memory_index =
472             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
473
474         let size = min_size.align_to(align.abi);
475         let mut abi = Abi::Aggregate { sized };
476
477         // Unpack newtype ABIs and find scalar pairs.
478         if sized && size.bytes() > 0 {
479             // All other fields must be ZSTs.
480             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
481
482             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
483                 // We have exactly one non-ZST field.
484                 (Some((i, field)), None, None) => {
485                     // Field fills the struct and it has a scalar or scalar pair ABI.
486                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
487                     {
488                         match field.abi {
489                             // For plain scalars, or vectors of them, we can't unpack
490                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
491                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
492                                 abi = field.abi;
493                             }
494                             // But scalar pairs are Rust-specific and get
495                             // treated as aggregates by C ABIs anyway.
496                             Abi::ScalarPair(..) => {
497                                 abi = field.abi;
498                             }
499                             _ => {}
500                         }
501                     }
502                 }
503
504                 // Two non-ZST fields, and they're both scalars.
505                 (Some((i, a)), Some((j, b)), None) => {
506                     match (a.abi, b.abi) {
507                         (Abi::Scalar(a), Abi::Scalar(b)) => {
508                             // Order by the memory placement, not source order.
509                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
510                                 ((i, a), (j, b))
511                             } else {
512                                 ((j, b), (i, a))
513                             };
514                             let pair = self.scalar_pair(a, b);
515                             let pair_offsets = match pair.fields {
516                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
517                                     assert_eq!(memory_index, &[0, 1]);
518                                     offsets
519                                 }
520                                 _ => bug!(),
521                             };
522                             if offsets[i] == pair_offsets[0]
523                                 && offsets[j] == pair_offsets[1]
524                                 && align == pair.align
525                                 && size == pair.size
526                             {
527                                 // We can use `ScalarPair` only when it matches our
528                                 // already computed layout (including `#[repr(C)]`).
529                                 abi = pair.abi;
530                             }
531                         }
532                         _ => {}
533                     }
534                 }
535
536                 _ => {}
537             }
538         }
539
540         if fields.iter().any(|f| f.abi.is_uninhabited()) {
541             abi = Abi::Uninhabited;
542         }
543
544         Ok(LayoutS {
545             variants: Variants::Single { index: VariantIdx::new(0) },
546             fields: FieldsShape::Arbitrary { offsets, memory_index },
547             abi,
548             largest_niche,
549             align,
550             size,
551         })
552     }
553
554     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
555         let tcx = self.tcx;
556         let param_env = self.param_env;
557         let dl = self.data_layout();
558         let scalar_unit = |value: Primitive| {
559             let size = value.size(dl);
560             assert!(size.bits() <= 128);
561             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
562         };
563         let scalar =
564             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
565
566         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
567             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
568         };
569         debug_assert!(!ty.has_infer_types_or_consts());
570
571         Ok(match *ty.kind() {
572             // Basic scalars.
573             ty::Bool => tcx.intern_layout(LayoutS::scalar(
574                 self,
575                 Scalar::Initialized {
576                     value: Int(I8, false),
577                     valid_range: WrappingRange { start: 0, end: 1 },
578                 },
579             )),
580             ty::Char => tcx.intern_layout(LayoutS::scalar(
581                 self,
582                 Scalar::Initialized {
583                     value: Int(I32, false),
584                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
585                 },
586             )),
587             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
588             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
589             ty::Float(fty) => scalar(match fty {
590                 ty::FloatTy::F32 => F32,
591                 ty::FloatTy::F64 => F64,
592             }),
593             ty::FnPtr(_) => {
594                 let mut ptr = scalar_unit(Pointer);
595                 ptr.valid_range_mut().start = 1;
596                 tcx.intern_layout(LayoutS::scalar(self, ptr))
597             }
598
599             // The never type.
600             ty::Never => tcx.intern_layout(LayoutS {
601                 variants: Variants::Single { index: VariantIdx::new(0) },
602                 fields: FieldsShape::Primitive,
603                 abi: Abi::Uninhabited,
604                 largest_niche: None,
605                 align: dl.i8_align,
606                 size: Size::ZERO,
607             }),
608
609             // Potentially-wide pointers.
610             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
611                 let mut data_ptr = scalar_unit(Pointer);
612                 if !ty.is_unsafe_ptr() {
613                     data_ptr.valid_range_mut().start = 1;
614                 }
615
616                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
617                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
618                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
619                 }
620
621                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
622                 let metadata = match unsized_part.kind() {
623                     ty::Foreign(..) => {
624                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
625                     }
626                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
627                     ty::Dynamic(..) => {
628                         let mut vtable = scalar_unit(Pointer);
629                         vtable.valid_range_mut().start = 1;
630                         vtable
631                     }
632                     _ => return Err(LayoutError::Unknown(unsized_part)),
633                 };
634
635                 // Effectively a (ptr, meta) tuple.
636                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
637             }
638
639             // Arrays and slices.
640             ty::Array(element, mut count) => {
641                 if count.has_projections() {
642                     count = tcx.normalize_erasing_regions(param_env, count);
643                     if count.has_projections() {
644                         return Err(LayoutError::Unknown(ty));
645                     }
646                 }
647
648                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
649                 let element = self.layout_of(element)?;
650                 let size =
651                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
652
653                 let abi =
654                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
655                         Abi::Uninhabited
656                     } else {
657                         Abi::Aggregate { sized: true }
658                     };
659
660                 let largest_niche = if count != 0 { element.largest_niche } else { None };
661
662                 tcx.intern_layout(LayoutS {
663                     variants: Variants::Single { index: VariantIdx::new(0) },
664                     fields: FieldsShape::Array { stride: element.size, count },
665                     abi,
666                     largest_niche,
667                     align: element.align,
668                     size,
669                 })
670             }
671             ty::Slice(element) => {
672                 let element = self.layout_of(element)?;
673                 tcx.intern_layout(LayoutS {
674                     variants: Variants::Single { index: VariantIdx::new(0) },
675                     fields: FieldsShape::Array { stride: element.size, count: 0 },
676                     abi: Abi::Aggregate { sized: false },
677                     largest_niche: None,
678                     align: element.align,
679                     size: Size::ZERO,
680                 })
681             }
682             ty::Str => tcx.intern_layout(LayoutS {
683                 variants: Variants::Single { index: VariantIdx::new(0) },
684                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
685                 abi: Abi::Aggregate { sized: false },
686                 largest_niche: None,
687                 align: dl.i8_align,
688                 size: Size::ZERO,
689             }),
690
691             // Odd unit types.
692             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
693             ty::Dynamic(..) | ty::Foreign(..) => {
694                 let mut unit = self.univariant_uninterned(
695                     ty,
696                     &[],
697                     &ReprOptions::default(),
698                     StructKind::AlwaysSized,
699                 )?;
700                 match unit.abi {
701                     Abi::Aggregate { ref mut sized } => *sized = false,
702                     _ => bug!(),
703                 }
704                 tcx.intern_layout(unit)
705             }
706
707             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
708
709             ty::Closure(_, ref substs) => {
710                 let tys = substs.as_closure().upvar_tys();
711                 univariant(
712                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
713                     &ReprOptions::default(),
714                     StructKind::AlwaysSized,
715                 )?
716             }
717
718             ty::Tuple(tys) => {
719                 let kind =
720                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
721
722                 univariant(
723                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
724                     &ReprOptions::default(),
725                     kind,
726                 )?
727             }
728
729             // SIMD vector types.
730             ty::Adt(def, substs) if def.repr().simd() => {
731                 if !def.is_struct() {
732                     // Should have yielded E0517 by now.
733                     tcx.sess.delay_span_bug(
734                         DUMMY_SP,
735                         "#[repr(simd)] was applied to an ADT that is not a struct",
736                     );
737                     return Err(LayoutError::Unknown(ty));
738                 }
739
740                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
741                 //
742                 // * #[repr(simd)] struct S(T, T, T, T);
743                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
744                 // * #[repr(simd)] struct S([T; 4])
745                 //
746                 // where T is a primitive scalar (integer/float/pointer).
747
748                 // SIMD vectors with zero fields are not supported.
749                 // (should be caught by typeck)
750                 if def.non_enum_variant().fields.is_empty() {
751                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
752                 }
753
754                 // Type of the first ADT field:
755                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
756
757                 // Heterogeneous SIMD vectors are not supported:
758                 // (should be caught by typeck)
759                 for fi in &def.non_enum_variant().fields {
760                     if fi.ty(tcx, substs) != f0_ty {
761                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
762                     }
763                 }
764
765                 // The element type and number of elements of the SIMD vector
766                 // are obtained from:
767                 //
768                 // * the element type and length of the single array field, if
769                 // the first field is of array type, or
770                 //
771                 // * the homogenous field type and the number of fields.
772                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
773                     // First ADT field is an array:
774
775                     // SIMD vectors with multiple array fields are not supported:
776                     // (should be caught by typeck)
777                     if def.non_enum_variant().fields.len() != 1 {
778                         tcx.sess.fatal(&format!(
779                             "monomorphising SIMD type `{}` with more than one array field",
780                             ty
781                         ));
782                     }
783
784                     // Extract the number of elements from the layout of the array field:
785                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
786                         return Err(LayoutError::Unknown(ty));
787                     };
788
789                     (*e_ty, *count, true)
790                 } else {
791                     // First ADT field is not an array:
792                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
793                 };
794
795                 // SIMD vectors of zero length are not supported.
796                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
797                 // support.
798                 //
799                 // Can't be caught in typeck if the array length is generic.
800                 if e_len == 0 {
801                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
802                 } else if e_len > MAX_SIMD_LANES {
803                     tcx.sess.fatal(&format!(
804                         "monomorphising SIMD type `{}` of length greater than {}",
805                         ty, MAX_SIMD_LANES,
806                     ));
807                 }
808
809                 // Compute the ABI of the element type:
810                 let e_ly = self.layout_of(e_ty)?;
811                 let Abi::Scalar(e_abi) = e_ly.abi else {
812                     // This error isn't caught in typeck, e.g., if
813                     // the element type of the vector is generic.
814                     tcx.sess.fatal(&format!(
815                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
816                         (integer/float/pointer) element type `{}`",
817                         ty, e_ty
818                     ))
819                 };
820
821                 // Compute the size and alignment of the vector:
822                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
823                 let align = dl.vector_align(size);
824                 let size = size.align_to(align.abi);
825
826                 // Compute the placement of the vector fields:
827                 let fields = if is_array {
828                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
829                 } else {
830                     FieldsShape::Array { stride: e_ly.size, count: e_len }
831                 };
832
833                 tcx.intern_layout(LayoutS {
834                     variants: Variants::Single { index: VariantIdx::new(0) },
835                     fields,
836                     abi: Abi::Vector { element: e_abi, count: e_len },
837                     largest_niche: e_ly.largest_niche,
838                     size,
839                     align,
840                 })
841             }
842
843             // ADTs.
844             ty::Adt(def, substs) => {
845                 // Cache the field layouts.
846                 let variants = def
847                     .variants()
848                     .iter()
849                     .map(|v| {
850                         v.fields
851                             .iter()
852                             .map(|field| self.layout_of(field.ty(tcx, substs)))
853                             .collect::<Result<Vec<_>, _>>()
854                     })
855                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
856
857                 if def.is_union() {
858                     if def.repr().pack.is_some() && def.repr().align.is_some() {
859                         self.tcx.sess.delay_span_bug(
860                             tcx.def_span(def.did()),
861                             "union cannot be packed and aligned",
862                         );
863                         return Err(LayoutError::Unknown(ty));
864                     }
865
866                     let mut align =
867                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
868
869                     if let Some(repr_align) = def.repr().align {
870                         align = align.max(AbiAndPrefAlign::new(repr_align));
871                     }
872
873                     let optimize = !def.repr().inhibit_union_abi_opt();
874                     let mut size = Size::ZERO;
875                     let mut abi = Abi::Aggregate { sized: true };
876                     let index = VariantIdx::new(0);
877                     for field in &variants[index] {
878                         assert!(!field.is_unsized());
879                         align = align.max(field.align);
880
881                         // If all non-ZST fields have the same ABI, forward this ABI
882                         if optimize && !field.is_zst() {
883                             // Discard valid range information and allow undef
884                             let field_abi = match field.abi {
885                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
886                                 Abi::ScalarPair(x, y) => {
887                                     Abi::ScalarPair(x.to_union(), y.to_union())
888                                 }
889                                 Abi::Vector { element: x, count } => {
890                                     Abi::Vector { element: x.to_union(), count }
891                                 }
892                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
893                                     Abi::Aggregate { sized: true }
894                                 }
895                             };
896
897                             if size == Size::ZERO {
898                                 // first non ZST: initialize 'abi'
899                                 abi = field_abi;
900                             } else if abi != field_abi {
901                                 // different fields have different ABI: reset to Aggregate
902                                 abi = Abi::Aggregate { sized: true };
903                             }
904                         }
905
906                         size = cmp::max(size, field.size);
907                     }
908
909                     if let Some(pack) = def.repr().pack {
910                         align = align.min(AbiAndPrefAlign::new(pack));
911                     }
912
913                     return Ok(tcx.intern_layout(LayoutS {
914                         variants: Variants::Single { index },
915                         fields: FieldsShape::Union(
916                             NonZeroUsize::new(variants[index].len())
917                                 .ok_or(LayoutError::Unknown(ty))?,
918                         ),
919                         abi,
920                         largest_niche: None,
921                         align,
922                         size: size.align_to(align.abi),
923                     }));
924                 }
925
926                 // A variant is absent if it's uninhabited and only has ZST fields.
927                 // Present uninhabited variants only require space for their fields,
928                 // but *not* an encoding of the discriminant (e.g., a tag value).
929                 // See issue #49298 for more details on the need to leave space
930                 // for non-ZST uninhabited data (mostly partial initialization).
931                 let absent = |fields: &[TyAndLayout<'_>]| {
932                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
933                     let is_zst = fields.iter().all(|f| f.is_zst());
934                     uninhabited && is_zst
935                 };
936                 let (present_first, present_second) = {
937                     let mut present_variants = variants
938                         .iter_enumerated()
939                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
940                     (present_variants.next(), present_variants.next())
941                 };
942                 let present_first = match present_first {
943                     Some(present_first) => present_first,
944                     // Uninhabited because it has no variants, or only absent ones.
945                     None if def.is_enum() => {
946                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
947                     }
948                     // If it's a struct, still compute a layout so that we can still compute the
949                     // field offsets.
950                     None => VariantIdx::new(0),
951                 };
952
953                 let is_struct = !def.is_enum() ||
954                     // Only one variant is present.
955                     (present_second.is_none() &&
956                     // Representation optimizations are allowed.
957                     !def.repr().inhibit_enum_layout_opt());
958                 if is_struct {
959                     // Struct, or univariant enum equivalent to a struct.
960                     // (Typechecking will reject discriminant-sizing attrs.)
961
962                     let v = present_first;
963                     let kind = if def.is_enum() || variants[v].is_empty() {
964                         StructKind::AlwaysSized
965                     } else {
966                         let param_env = tcx.param_env(def.did());
967                         let last_field = def.variant(v).fields.last().unwrap();
968                         let always_sized =
969                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
970                         if !always_sized {
971                             StructKind::MaybeUnsized
972                         } else {
973                             StructKind::AlwaysSized
974                         }
975                     };
976
977                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
978                     st.variants = Variants::Single { index: v };
979                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
980                     match st.abi {
981                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
982                             // the asserts ensure that we are not using the
983                             // `#[rustc_layout_scalar_valid_range(n)]`
984                             // attribute to widen the range of anything as that would probably
985                             // result in UB somewhere
986                             // FIXME(eddyb) the asserts are probably not needed,
987                             // as larger validity ranges would result in missed
988                             // optimizations, *not* wrongly assuming the inner
989                             // value is valid. e.g. unions enlarge validity ranges,
990                             // because the values may be uninitialized.
991                             if let Bound::Included(start) = start {
992                                 // FIXME(eddyb) this might be incorrect - it doesn't
993                                 // account for wrap-around (end < start) ranges.
994                                 let valid_range = scalar.valid_range_mut();
995                                 assert!(valid_range.start <= start);
996                                 valid_range.start = start;
997                             }
998                             if let Bound::Included(end) = end {
999                                 // FIXME(eddyb) this might be incorrect - it doesn't
1000                                 // account for wrap-around (end < start) ranges.
1001                                 let valid_range = scalar.valid_range_mut();
1002                                 assert!(valid_range.end >= end);
1003                                 valid_range.end = end;
1004                             }
1005
1006                             // Update `largest_niche` if we have introduced a larger niche.
1007                             let niche = if def.repr().hide_niche() {
1008                                 None
1009                             } else {
1010                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1011                             };
1012                             if let Some(niche) = niche {
1013                                 match st.largest_niche {
1014                                     Some(largest_niche) => {
1015                                         // Replace the existing niche even if they're equal,
1016                                         // because this one is at a lower offset.
1017                                         if largest_niche.available(dl) <= niche.available(dl) {
1018                                             st.largest_niche = Some(niche);
1019                                         }
1020                                     }
1021                                     None => st.largest_niche = Some(niche),
1022                                 }
1023                             }
1024                         }
1025                         _ => assert!(
1026                             start == Bound::Unbounded && end == Bound::Unbounded,
1027                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1028                             def,
1029                             st,
1030                         ),
1031                     }
1032
1033                     return Ok(tcx.intern_layout(st));
1034                 }
1035
1036                 // At this point, we have handled all unions and
1037                 // structs. (We have also handled univariant enums
1038                 // that allow representation optimization.)
1039                 assert!(def.is_enum());
1040
1041                 // The current code for niche-filling relies on variant indices
1042                 // instead of actual discriminants, so dataful enums with
1043                 // explicit discriminants (RFC #2363) would misbehave.
1044                 let no_explicit_discriminants = def
1045                     .variants()
1046                     .iter_enumerated()
1047                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1048
1049                 let mut niche_filling_layout = None;
1050
1051                 // Niche-filling enum optimization.
1052                 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1053                     let mut dataful_variant = None;
1054                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1055
1056                     // Find one non-ZST variant.
1057                     'variants: for (v, fields) in variants.iter_enumerated() {
1058                         if absent(fields) {
1059                             continue 'variants;
1060                         }
1061                         for f in fields {
1062                             if !f.is_zst() {
1063                                 if dataful_variant.is_none() {
1064                                     dataful_variant = Some(v);
1065                                     continue 'variants;
1066                                 } else {
1067                                     dataful_variant = None;
1068                                     break 'variants;
1069                                 }
1070                             }
1071                         }
1072                         niche_variants = *niche_variants.start().min(&v)..=v;
1073                     }
1074
1075                     if niche_variants.start() > niche_variants.end() {
1076                         dataful_variant = None;
1077                     }
1078
1079                     if let Some(i) = dataful_variant {
1080                         let count = (niche_variants.end().as_u32()
1081                             - niche_variants.start().as_u32()
1082                             + 1) as u128;
1083
1084                         // Find the field with the largest niche
1085                         let niche_candidate = variants[i]
1086                             .iter()
1087                             .enumerate()
1088                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1089                             .max_by_key(|(_, niche)| niche.available(dl));
1090
1091                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1092                             niche_candidate.and_then(|(field_index, niche)| {
1093                                 Some((field_index, niche, niche.reserve(self, count)?))
1094                             })
1095                         {
1096                             let mut align = dl.aggregate_align;
1097                             let st = variants
1098                                 .iter_enumerated()
1099                                 .map(|(j, v)| {
1100                                     let mut st = self.univariant_uninterned(
1101                                         ty,
1102                                         v,
1103                                         &def.repr(),
1104                                         StructKind::AlwaysSized,
1105                                     )?;
1106                                     st.variants = Variants::Single { index: j };
1107
1108                                     align = align.max(st.align);
1109
1110                                     Ok(tcx.intern_layout(st))
1111                                 })
1112                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1113
1114                             let offset = st[i].fields().offset(field_index) + niche.offset;
1115                             let size = st[i].size();
1116
1117                             let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1118                                 Abi::Uninhabited
1119                             } else {
1120                                 match st[i].abi() {
1121                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1122                                     Abi::ScalarPair(first, second) => {
1123                                         // Only the niche is guaranteed to be initialised,
1124                                         // so use union layout for the other primitive.
1125                                         if offset.bytes() == 0 {
1126                                             Abi::ScalarPair(niche_scalar, second.to_union())
1127                                         } else {
1128                                             Abi::ScalarPair(first.to_union(), niche_scalar)
1129                                         }
1130                                     }
1131                                     _ => Abi::Aggregate { sized: true },
1132                                 }
1133                             };
1134
1135                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1136
1137                             niche_filling_layout = Some(LayoutS {
1138                                 variants: Variants::Multiple {
1139                                     tag: niche_scalar,
1140                                     tag_encoding: TagEncoding::Niche {
1141                                         dataful_variant: i,
1142                                         niche_variants,
1143                                         niche_start,
1144                                     },
1145                                     tag_field: 0,
1146                                     variants: st,
1147                                 },
1148                                 fields: FieldsShape::Arbitrary {
1149                                     offsets: vec![offset],
1150                                     memory_index: vec![0],
1151                                 },
1152                                 abi,
1153                                 largest_niche,
1154                                 size,
1155                                 align,
1156                             });
1157                         }
1158                     }
1159                 }
1160
1161                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1162                 let discr_type = def.repr().discr_type();
1163                 let bits = Integer::from_attr(self, discr_type).size().bits();
1164                 for (i, discr) in def.discriminants(tcx) {
1165                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1166                         continue;
1167                     }
1168                     let mut x = discr.val as i128;
1169                     if discr_type.is_signed() {
1170                         // sign extend the raw representation to be an i128
1171                         x = (x << (128 - bits)) >> (128 - bits);
1172                     }
1173                     if x < min {
1174                         min = x;
1175                     }
1176                     if x > max {
1177                         max = x;
1178                     }
1179                 }
1180                 // We might have no inhabited variants, so pretend there's at least one.
1181                 if (min, max) == (i128::MAX, i128::MIN) {
1182                     min = 0;
1183                     max = 0;
1184                 }
1185                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1186                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1187
1188                 let mut align = dl.aggregate_align;
1189                 let mut size = Size::ZERO;
1190
1191                 // We're interested in the smallest alignment, so start large.
1192                 let mut start_align = Align::from_bytes(256).unwrap();
1193                 assert_eq!(Integer::for_align(dl, start_align), None);
1194
1195                 // repr(C) on an enum tells us to make a (tag, union) layout,
1196                 // so we need to grow the prefix alignment to be at least
1197                 // the alignment of the union. (This value is used both for
1198                 // determining the alignment of the overall enum, and the
1199                 // determining the alignment of the payload after the tag.)
1200                 let mut prefix_align = min_ity.align(dl).abi;
1201                 if def.repr().c() {
1202                     for fields in &variants {
1203                         for field in fields {
1204                             prefix_align = prefix_align.max(field.align.abi);
1205                         }
1206                     }
1207                 }
1208
1209                 // Create the set of structs that represent each variant.
1210                 let mut layout_variants = variants
1211                     .iter_enumerated()
1212                     .map(|(i, field_layouts)| {
1213                         let mut st = self.univariant_uninterned(
1214                             ty,
1215                             &field_layouts,
1216                             &def.repr(),
1217                             StructKind::Prefixed(min_ity.size(), prefix_align),
1218                         )?;
1219                         st.variants = Variants::Single { index: i };
1220                         // Find the first field we can't move later
1221                         // to make room for a larger discriminant.
1222                         for field in
1223                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1224                         {
1225                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1226                                 start_align = start_align.min(field.align.abi);
1227                                 break;
1228                             }
1229                         }
1230                         size = cmp::max(size, st.size);
1231                         align = align.max(st.align);
1232                         Ok(st)
1233                     })
1234                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1235
1236                 // Align the maximum variant size to the largest alignment.
1237                 size = size.align_to(align.abi);
1238
1239                 if size.bytes() >= dl.obj_size_bound() {
1240                     return Err(LayoutError::SizeOverflow(ty));
1241                 }
1242
1243                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1244                 if typeck_ity < min_ity {
1245                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1246                     // some reason at this point (based on values discriminant can take on). Mostly
1247                     // because this discriminant will be loaded, and then stored into variable of
1248                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1249                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1250                     // discriminant values. That would be a bug, because then, in codegen, in order
1251                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1252                     // space necessary to represent would have to be discarded (or layout is wrong
1253                     // on thinking it needs 16 bits)
1254                     bug!(
1255                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1256                         min_ity,
1257                         typeck_ity
1258                     );
1259                     // However, it is fine to make discr type however large (as an optimisation)
1260                     // after this point â€“ we’ll just truncate the value we load in codegen.
1261                 }
1262
1263                 // Check to see if we should use a different type for the
1264                 // discriminant. We can safely use a type with the same size
1265                 // as the alignment of the first field of each variant.
1266                 // We increase the size of the discriminant to avoid LLVM copying
1267                 // padding when it doesn't need to. This normally causes unaligned
1268                 // load/stores and excessive memcpy/memset operations. By using a
1269                 // bigger integer size, LLVM can be sure about its contents and
1270                 // won't be so conservative.
1271
1272                 // Use the initial field alignment
1273                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1274                     min_ity
1275                 } else {
1276                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1277                 };
1278
1279                 // If the alignment is not larger than the chosen discriminant size,
1280                 // don't use the alignment as the final size.
1281                 if ity <= min_ity {
1282                     ity = min_ity;
1283                 } else {
1284                     // Patch up the variants' first few fields.
1285                     let old_ity_size = min_ity.size();
1286                     let new_ity_size = ity.size();
1287                     for variant in &mut layout_variants {
1288                         match variant.fields {
1289                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1290                                 for i in offsets {
1291                                     if *i <= old_ity_size {
1292                                         assert_eq!(*i, old_ity_size);
1293                                         *i = new_ity_size;
1294                                     }
1295                                 }
1296                                 // We might be making the struct larger.
1297                                 if variant.size <= old_ity_size {
1298                                     variant.size = new_ity_size;
1299                                 }
1300                             }
1301                             _ => bug!(),
1302                         }
1303                     }
1304                 }
1305
1306                 let tag_mask = ity.size().unsigned_int_max();
1307                 let tag = Scalar::Initialized {
1308                     value: Int(ity, signed),
1309                     valid_range: WrappingRange {
1310                         start: (min as u128 & tag_mask),
1311                         end: (max as u128 & tag_mask),
1312                     },
1313                 };
1314                 let mut abi = Abi::Aggregate { sized: true };
1315
1316                 // Without latter check aligned enums with custom discriminant values
1317                 // Would result in ICE see the issue #92464 for more info
1318                 if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1319                     abi = Abi::Scalar(tag);
1320                 } else {
1321                     // Try to use a ScalarPair for all tagged enums.
1322                     let mut common_prim = None;
1323                     let mut common_prim_initialized_in_all_variants = true;
1324                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1325                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1326                             bug!();
1327                         };
1328                         let mut fields =
1329                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1330                         let (field, offset) = match (fields.next(), fields.next()) {
1331                             (None, None) => {
1332                                 common_prim_initialized_in_all_variants = false;
1333                                 continue;
1334                             }
1335                             (Some(pair), None) => pair,
1336                             _ => {
1337                                 common_prim = None;
1338                                 break;
1339                             }
1340                         };
1341                         let prim = match field.abi {
1342                             Abi::Scalar(scalar) => {
1343                                 common_prim_initialized_in_all_variants &=
1344                                     matches!(scalar, Scalar::Initialized { .. });
1345                                 scalar.primitive()
1346                             }
1347                             _ => {
1348                                 common_prim = None;
1349                                 break;
1350                             }
1351                         };
1352                         if let Some(pair) = common_prim {
1353                             // This is pretty conservative. We could go fancier
1354                             // by conflating things like i32 and u32, or even
1355                             // realising that (u8, u8) could just cohabit with
1356                             // u16 or even u32.
1357                             if pair != (prim, offset) {
1358                                 common_prim = None;
1359                                 break;
1360                             }
1361                         } else {
1362                             common_prim = Some((prim, offset));
1363                         }
1364                     }
1365                     if let Some((prim, offset)) = common_prim {
1366                         let prim_scalar = if common_prim_initialized_in_all_variants {
1367                             scalar_unit(prim)
1368                         } else {
1369                             // Common prim might be uninit.
1370                             Scalar::Union { value: prim }
1371                         };
1372                         let pair = self.scalar_pair(tag, prim_scalar);
1373                         let pair_offsets = match pair.fields {
1374                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1375                                 assert_eq!(memory_index, &[0, 1]);
1376                                 offsets
1377                             }
1378                             _ => bug!(),
1379                         };
1380                         if pair_offsets[0] == Size::ZERO
1381                             && pair_offsets[1] == *offset
1382                             && align == pair.align
1383                             && size == pair.size
1384                         {
1385                             // We can use `ScalarPair` only when it matches our
1386                             // already computed layout (including `#[repr(C)]`).
1387                             abi = pair.abi;
1388                         }
1389                     }
1390                 }
1391
1392                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1393                     abi = Abi::Uninhabited;
1394                 }
1395
1396                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1397
1398                 let layout_variants =
1399                     layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1400
1401                 let tagged_layout = LayoutS {
1402                     variants: Variants::Multiple {
1403                         tag,
1404                         tag_encoding: TagEncoding::Direct,
1405                         tag_field: 0,
1406                         variants: layout_variants,
1407                     },
1408                     fields: FieldsShape::Arbitrary {
1409                         offsets: vec![Size::ZERO],
1410                         memory_index: vec![0],
1411                     },
1412                     largest_niche,
1413                     abi,
1414                     align,
1415                     size,
1416                 };
1417
1418                 let best_layout = match (tagged_layout, niche_filling_layout) {
1419                     (tagged_layout, Some(niche_filling_layout)) => {
1420                         // Pick the smaller layout; otherwise,
1421                         // pick the layout with the larger niche; otherwise,
1422                         // pick tagged as it has simpler codegen.
1423                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1424                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1425                             (layout.size, cmp::Reverse(niche_size))
1426                         })
1427                     }
1428                     (tagged_layout, None) => tagged_layout,
1429                 };
1430
1431                 tcx.intern_layout(best_layout)
1432             }
1433
1434             // Types with no meaningful known layout.
1435             ty::Projection(_) | ty::Opaque(..) => {
1436                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1437                 // if that was possible, so there's no reason to try again here.
1438                 return Err(LayoutError::Unknown(ty));
1439             }
1440
1441             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1442                 bug!("Layout::compute: unexpected type `{}`", ty)
1443             }
1444
1445             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1446                 return Err(LayoutError::Unknown(ty));
1447             }
1448         })
1449     }
1450 }
1451
1452 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1453 #[derive(Clone, Debug, PartialEq)]
1454 enum SavedLocalEligibility {
1455     Unassigned,
1456     Assigned(VariantIdx),
1457     // FIXME: Use newtype_index so we aren't wasting bytes
1458     Ineligible(Option<u32>),
1459 }
1460
1461 // When laying out generators, we divide our saved local fields into two
1462 // categories: overlap-eligible and overlap-ineligible.
1463 //
1464 // Those fields which are ineligible for overlap go in a "prefix" at the
1465 // beginning of the layout, and always have space reserved for them.
1466 //
1467 // Overlap-eligible fields are only assigned to one variant, so we lay
1468 // those fields out for each variant and put them right after the
1469 // prefix.
1470 //
1471 // Finally, in the layout details, we point to the fields from the
1472 // variants they are assigned to. It is possible for some fields to be
1473 // included in multiple variants. No field ever "moves around" in the
1474 // layout; its offset is always the same.
1475 //
1476 // Also included in the layout are the upvars and the discriminant.
1477 // These are included as fields on the "outer" layout; they are not part
1478 // of any variant.
1479 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1480     /// Compute the eligibility and assignment of each local.
1481     fn generator_saved_local_eligibility(
1482         &self,
1483         info: &GeneratorLayout<'tcx>,
1484     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1485         use SavedLocalEligibility::*;
1486
1487         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1488             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1489
1490         // The saved locals not eligible for overlap. These will get
1491         // "promoted" to the prefix of our generator.
1492         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1493
1494         // Figure out which of our saved locals are fields in only
1495         // one variant. The rest are deemed ineligible for overlap.
1496         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1497             for local in fields {
1498                 match assignments[*local] {
1499                     Unassigned => {
1500                         assignments[*local] = Assigned(variant_index);
1501                     }
1502                     Assigned(idx) => {
1503                         // We've already seen this local at another suspension
1504                         // point, so it is no longer a candidate.
1505                         trace!(
1506                             "removing local {:?} in >1 variant ({:?}, {:?})",
1507                             local,
1508                             variant_index,
1509                             idx
1510                         );
1511                         ineligible_locals.insert(*local);
1512                         assignments[*local] = Ineligible(None);
1513                     }
1514                     Ineligible(_) => {}
1515                 }
1516             }
1517         }
1518
1519         // Next, check every pair of eligible locals to see if they
1520         // conflict.
1521         for local_a in info.storage_conflicts.rows() {
1522             let conflicts_a = info.storage_conflicts.count(local_a);
1523             if ineligible_locals.contains(local_a) {
1524                 continue;
1525             }
1526
1527             for local_b in info.storage_conflicts.iter(local_a) {
1528                 // local_a and local_b are storage live at the same time, therefore they
1529                 // cannot overlap in the generator layout. The only way to guarantee
1530                 // this is if they are in the same variant, or one is ineligible
1531                 // (which means it is stored in every variant).
1532                 if ineligible_locals.contains(local_b)
1533                     || assignments[local_a] == assignments[local_b]
1534                 {
1535                     continue;
1536                 }
1537
1538                 // If they conflict, we will choose one to make ineligible.
1539                 // This is not always optimal; it's just a greedy heuristic that
1540                 // seems to produce good results most of the time.
1541                 let conflicts_b = info.storage_conflicts.count(local_b);
1542                 let (remove, other) =
1543                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1544                 ineligible_locals.insert(remove);
1545                 assignments[remove] = Ineligible(None);
1546                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1547             }
1548         }
1549
1550         // Count the number of variants in use. If only one of them, then it is
1551         // impossible to overlap any locals in our layout. In this case it's
1552         // always better to make the remaining locals ineligible, so we can
1553         // lay them out with the other locals in the prefix and eliminate
1554         // unnecessary padding bytes.
1555         {
1556             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1557             for assignment in &assignments {
1558                 if let Assigned(idx) = assignment {
1559                     used_variants.insert(*idx);
1560                 }
1561             }
1562             if used_variants.count() < 2 {
1563                 for assignment in assignments.iter_mut() {
1564                     *assignment = Ineligible(None);
1565                 }
1566                 ineligible_locals.insert_all();
1567             }
1568         }
1569
1570         // Write down the order of our locals that will be promoted to the prefix.
1571         {
1572             for (idx, local) in ineligible_locals.iter().enumerate() {
1573                 assignments[local] = Ineligible(Some(idx as u32));
1574             }
1575         }
1576         debug!("generator saved local assignments: {:?}", assignments);
1577
1578         (ineligible_locals, assignments)
1579     }
1580
1581     /// Compute the full generator layout.
1582     fn generator_layout(
1583         &self,
1584         ty: Ty<'tcx>,
1585         def_id: hir::def_id::DefId,
1586         substs: SubstsRef<'tcx>,
1587     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1588         use SavedLocalEligibility::*;
1589         let tcx = self.tcx;
1590         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1591
1592         let Some(info) = tcx.generator_layout(def_id) else {
1593             return Err(LayoutError::Unknown(ty));
1594         };
1595         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1596
1597         // Build a prefix layout, including "promoting" all ineligible
1598         // locals as part of the prefix. We compute the layout of all of
1599         // these fields at once to get optimal packing.
1600         let tag_index = substs.as_generator().prefix_tys().count();
1601
1602         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1603         let max_discr = (info.variant_fields.len() - 1) as u128;
1604         let discr_int = Integer::fit_unsigned(max_discr);
1605         let discr_int_ty = discr_int.to_ty(tcx, false);
1606         let tag = Scalar::Initialized {
1607             value: Primitive::Int(discr_int, false),
1608             valid_range: WrappingRange { start: 0, end: max_discr },
1609         };
1610         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1611         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1612
1613         let promoted_layouts = ineligible_locals
1614             .iter()
1615             .map(|local| subst_field(info.field_tys[local]))
1616             .map(|ty| tcx.mk_maybe_uninit(ty))
1617             .map(|ty| self.layout_of(ty));
1618         let prefix_layouts = substs
1619             .as_generator()
1620             .prefix_tys()
1621             .map(|ty| self.layout_of(ty))
1622             .chain(iter::once(Ok(tag_layout)))
1623             .chain(promoted_layouts)
1624             .collect::<Result<Vec<_>, _>>()?;
1625         let prefix = self.univariant_uninterned(
1626             ty,
1627             &prefix_layouts,
1628             &ReprOptions::default(),
1629             StructKind::AlwaysSized,
1630         )?;
1631
1632         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1633
1634         // Split the prefix layout into the "outer" fields (upvars and
1635         // discriminant) and the "promoted" fields. Promoted fields will
1636         // get included in each variant that requested them in
1637         // GeneratorLayout.
1638         debug!("prefix = {:#?}", prefix);
1639         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1640             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1641                 let mut inverse_memory_index = invert_mapping(&memory_index);
1642
1643                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1644                 // "outer" and "promoted" fields respectively.
1645                 let b_start = (tag_index + 1) as u32;
1646                 let offsets_b = offsets.split_off(b_start as usize);
1647                 let offsets_a = offsets;
1648
1649                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1650                 // by preserving the order but keeping only one disjoint "half" each.
1651                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1652                 let inverse_memory_index_b: Vec<_> =
1653                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1654                 inverse_memory_index.retain(|&i| i < b_start);
1655                 let inverse_memory_index_a = inverse_memory_index;
1656
1657                 // Since `inverse_memory_index_{a,b}` each only refer to their
1658                 // respective fields, they can be safely inverted
1659                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1660                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1661
1662                 let outer_fields =
1663                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1664                 (outer_fields, offsets_b, memory_index_b)
1665             }
1666             _ => bug!(),
1667         };
1668
1669         let mut size = prefix.size;
1670         let mut align = prefix.align;
1671         let variants = info
1672             .variant_fields
1673             .iter_enumerated()
1674             .map(|(index, variant_fields)| {
1675                 // Only include overlap-eligible fields when we compute our variant layout.
1676                 let variant_only_tys = variant_fields
1677                     .iter()
1678                     .filter(|local| match assignments[**local] {
1679                         Unassigned => bug!(),
1680                         Assigned(v) if v == index => true,
1681                         Assigned(_) => bug!("assignment does not match variant"),
1682                         Ineligible(_) => false,
1683                     })
1684                     .map(|local| subst_field(info.field_tys[*local]));
1685
1686                 let mut variant = self.univariant_uninterned(
1687                     ty,
1688                     &variant_only_tys
1689                         .map(|ty| self.layout_of(ty))
1690                         .collect::<Result<Vec<_>, _>>()?,
1691                     &ReprOptions::default(),
1692                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1693                 )?;
1694                 variant.variants = Variants::Single { index };
1695
1696                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1697                     bug!();
1698                 };
1699
1700                 // Now, stitch the promoted and variant-only fields back together in
1701                 // the order they are mentioned by our GeneratorLayout.
1702                 // Because we only use some subset (that can differ between variants)
1703                 // of the promoted fields, we can't just pick those elements of the
1704                 // `promoted_memory_index` (as we'd end up with gaps).
1705                 // So instead, we build an "inverse memory_index", as if all of the
1706                 // promoted fields were being used, but leave the elements not in the
1707                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1708                 // obtain a valid (bijective) mapping.
1709                 const INVALID_FIELD_IDX: u32 = !0;
1710                 let mut combined_inverse_memory_index =
1711                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1712                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1713                 let combined_offsets = variant_fields
1714                     .iter()
1715                     .enumerate()
1716                     .map(|(i, local)| {
1717                         let (offset, memory_index) = match assignments[*local] {
1718                             Unassigned => bug!(),
1719                             Assigned(_) => {
1720                                 let (offset, memory_index) =
1721                                     offsets_and_memory_index.next().unwrap();
1722                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1723                             }
1724                             Ineligible(field_idx) => {
1725                                 let field_idx = field_idx.unwrap() as usize;
1726                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1727                             }
1728                         };
1729                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1730                         offset
1731                     })
1732                     .collect();
1733
1734                 // Remove the unused slots and invert the mapping to obtain the
1735                 // combined `memory_index` (also see previous comment).
1736                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1737                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1738
1739                 variant.fields = FieldsShape::Arbitrary {
1740                     offsets: combined_offsets,
1741                     memory_index: combined_memory_index,
1742                 };
1743
1744                 size = size.max(variant.size);
1745                 align = align.max(variant.align);
1746                 Ok(tcx.intern_layout(variant))
1747             })
1748             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1749
1750         size = size.align_to(align.abi);
1751
1752         let abi =
1753             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1754                 Abi::Uninhabited
1755             } else {
1756                 Abi::Aggregate { sized: true }
1757             };
1758
1759         let layout = tcx.intern_layout(LayoutS {
1760             variants: Variants::Multiple {
1761                 tag,
1762                 tag_encoding: TagEncoding::Direct,
1763                 tag_field: tag_index,
1764                 variants,
1765             },
1766             fields: outer_fields,
1767             abi,
1768             largest_niche: prefix.largest_niche,
1769             size,
1770             align,
1771         });
1772         debug!("generator layout ({:?}): {:#?}", ty, layout);
1773         Ok(layout)
1774     }
1775
1776     /// This is invoked by the `layout_of` query to record the final
1777     /// layout of each type.
1778     #[inline(always)]
1779     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1780         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1781         // for dumping later.
1782         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1783             self.record_layout_for_printing_outlined(layout)
1784         }
1785     }
1786
1787     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1788         // Ignore layouts that are done with non-empty environments or
1789         // non-monomorphic layouts, as the user only wants to see the stuff
1790         // resulting from the final codegen session.
1791         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1792             return;
1793         }
1794
1795         // (delay format until we actually need it)
1796         let record = |kind, packed, opt_discr_size, variants| {
1797             let type_desc = format!("{:?}", layout.ty);
1798             self.tcx.sess.code_stats.record_type_size(
1799                 kind,
1800                 type_desc,
1801                 layout.align.abi,
1802                 layout.size,
1803                 packed,
1804                 opt_discr_size,
1805                 variants,
1806             );
1807         };
1808
1809         let adt_def = match *layout.ty.kind() {
1810             ty::Adt(ref adt_def, _) => {
1811                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1812                 adt_def
1813             }
1814
1815             ty::Closure(..) => {
1816                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1817                 record(DataTypeKind::Closure, false, None, vec![]);
1818                 return;
1819             }
1820
1821             _ => {
1822                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1823                 return;
1824             }
1825         };
1826
1827         let adt_kind = adt_def.adt_kind();
1828         let adt_packed = adt_def.repr().pack.is_some();
1829
1830         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1831             let mut min_size = Size::ZERO;
1832             let field_info: Vec<_> = flds
1833                 .iter()
1834                 .enumerate()
1835                 .map(|(i, &name)| {
1836                     let field_layout = layout.field(self, i);
1837                     let offset = layout.fields.offset(i);
1838                     let field_end = offset + field_layout.size;
1839                     if min_size < field_end {
1840                         min_size = field_end;
1841                     }
1842                     FieldInfo {
1843                         name: name.to_string(),
1844                         offset: offset.bytes(),
1845                         size: field_layout.size.bytes(),
1846                         align: field_layout.align.abi.bytes(),
1847                     }
1848                 })
1849                 .collect();
1850
1851             VariantInfo {
1852                 name: n.map(|n| n.to_string()),
1853                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1854                 align: layout.align.abi.bytes(),
1855                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1856                 fields: field_info,
1857             }
1858         };
1859
1860         match layout.variants {
1861             Variants::Single { index } => {
1862                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1863                     debug!(
1864                         "print-type-size `{:#?}` variant {}",
1865                         layout,
1866                         adt_def.variant(index).name
1867                     );
1868                     let variant_def = &adt_def.variant(index);
1869                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1870                     record(
1871                         adt_kind.into(),
1872                         adt_packed,
1873                         None,
1874                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1875                     );
1876                 } else {
1877                     // (This case arises for *empty* enums; so give it
1878                     // zero variants.)
1879                     record(adt_kind.into(), adt_packed, None, vec![]);
1880                 }
1881             }
1882
1883             Variants::Multiple { tag, ref tag_encoding, .. } => {
1884                 debug!(
1885                     "print-type-size `{:#?}` adt general variants def {}",
1886                     layout.ty,
1887                     adt_def.variants().len()
1888                 );
1889                 let variant_infos: Vec<_> = adt_def
1890                     .variants()
1891                     .iter_enumerated()
1892                     .map(|(i, variant_def)| {
1893                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1894                         build_variant_info(
1895                             Some(variant_def.name),
1896                             &fields,
1897                             layout.for_variant(self, i),
1898                         )
1899                     })
1900                     .collect();
1901                 record(
1902                     adt_kind.into(),
1903                     adt_packed,
1904                     match tag_encoding {
1905                         TagEncoding::Direct => Some(tag.size(self)),
1906                         _ => None,
1907                     },
1908                     variant_infos,
1909                 );
1910             }
1911         }
1912     }
1913 }
1914
1915 /// Type size "skeleton", i.e., the only information determining a type's size.
1916 /// While this is conservative, (aside from constant sizes, only pointers,
1917 /// newtypes thereof and null pointer optimized enums are allowed), it is
1918 /// enough to statically check common use cases of transmute.
1919 #[derive(Copy, Clone, Debug)]
1920 pub enum SizeSkeleton<'tcx> {
1921     /// Any statically computable Layout.
1922     Known(Size),
1923
1924     /// A potentially-fat pointer.
1925     Pointer {
1926         /// If true, this pointer is never null.
1927         non_zero: bool,
1928         /// The type which determines the unsized metadata, if any,
1929         /// of this pointer. Either a type parameter or a projection
1930         /// depending on one, with regions erased.
1931         tail: Ty<'tcx>,
1932     },
1933 }
1934
1935 impl<'tcx> SizeSkeleton<'tcx> {
1936     pub fn compute(
1937         ty: Ty<'tcx>,
1938         tcx: TyCtxt<'tcx>,
1939         param_env: ty::ParamEnv<'tcx>,
1940     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1941         debug_assert!(!ty.has_infer_types_or_consts());
1942
1943         // First try computing a static layout.
1944         let err = match tcx.layout_of(param_env.and(ty)) {
1945             Ok(layout) => {
1946                 return Ok(SizeSkeleton::Known(layout.size));
1947             }
1948             Err(err) => err,
1949         };
1950
1951         match *ty.kind() {
1952             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1953                 let non_zero = !ty.is_unsafe_ptr();
1954                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1955                 match tail.kind() {
1956                     ty::Param(_) | ty::Projection(_) => {
1957                         debug_assert!(tail.has_param_types_or_consts());
1958                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1959                     }
1960                     _ => bug!(
1961                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1962                               tail `{}` is not a type parameter or a projection",
1963                         ty,
1964                         err,
1965                         tail
1966                     ),
1967                 }
1968             }
1969
1970             ty::Adt(def, substs) => {
1971                 // Only newtypes and enums w/ nullable pointer optimization.
1972                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1973                     return Err(err);
1974                 }
1975
1976                 // Get a zero-sized variant or a pointer newtype.
1977                 let zero_or_ptr_variant = |i| {
1978                     let i = VariantIdx::new(i);
1979                     let fields =
1980                         def.variant(i).fields.iter().map(|field| {
1981                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1982                         });
1983                     let mut ptr = None;
1984                     for field in fields {
1985                         let field = field?;
1986                         match field {
1987                             SizeSkeleton::Known(size) => {
1988                                 if size.bytes() > 0 {
1989                                     return Err(err);
1990                                 }
1991                             }
1992                             SizeSkeleton::Pointer { .. } => {
1993                                 if ptr.is_some() {
1994                                     return Err(err);
1995                                 }
1996                                 ptr = Some(field);
1997                             }
1998                         }
1999                     }
2000                     Ok(ptr)
2001                 };
2002
2003                 let v0 = zero_or_ptr_variant(0)?;
2004                 // Newtype.
2005                 if def.variants().len() == 1 {
2006                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2007                         return Ok(SizeSkeleton::Pointer {
2008                             non_zero: non_zero
2009                                 || match tcx.layout_scalar_valid_range(def.did()) {
2010                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2011                                     (Bound::Included(start), Bound::Included(end)) => {
2012                                         0 < start && start < end
2013                                     }
2014                                     _ => false,
2015                                 },
2016                             tail,
2017                         });
2018                     } else {
2019                         return Err(err);
2020                     }
2021                 }
2022
2023                 let v1 = zero_or_ptr_variant(1)?;
2024                 // Nullable pointer enum optimization.
2025                 match (v0, v1) {
2026                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2027                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2028                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2029                     }
2030                     _ => Err(err),
2031                 }
2032             }
2033
2034             ty::Projection(_) | ty::Opaque(..) => {
2035                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2036                 if ty == normalized {
2037                     Err(err)
2038                 } else {
2039                     SizeSkeleton::compute(normalized, tcx, param_env)
2040                 }
2041             }
2042
2043             _ => Err(err),
2044         }
2045     }
2046
2047     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2048         match (self, other) {
2049             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2050             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2051                 a == b
2052             }
2053             _ => false,
2054         }
2055     }
2056 }
2057
2058 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2059     fn tcx(&self) -> TyCtxt<'tcx>;
2060 }
2061
2062 pub trait HasParamEnv<'tcx> {
2063     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2064 }
2065
2066 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2067     #[inline]
2068     fn data_layout(&self) -> &TargetDataLayout {
2069         &self.data_layout
2070     }
2071 }
2072
2073 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2074     fn target_spec(&self) -> &Target {
2075         &self.sess.target
2076     }
2077 }
2078
2079 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2080     #[inline]
2081     fn tcx(&self) -> TyCtxt<'tcx> {
2082         *self
2083     }
2084 }
2085
2086 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2087     #[inline]
2088     fn data_layout(&self) -> &TargetDataLayout {
2089         &self.data_layout
2090     }
2091 }
2092
2093 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2094     fn target_spec(&self) -> &Target {
2095         &self.sess.target
2096     }
2097 }
2098
2099 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2100     #[inline]
2101     fn tcx(&self) -> TyCtxt<'tcx> {
2102         **self
2103     }
2104 }
2105
2106 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2107     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2108         self.param_env
2109     }
2110 }
2111
2112 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2113     fn data_layout(&self) -> &TargetDataLayout {
2114         self.tcx.data_layout()
2115     }
2116 }
2117
2118 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2119     fn target_spec(&self) -> &Target {
2120         self.tcx.target_spec()
2121     }
2122 }
2123
2124 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2125     fn tcx(&self) -> TyCtxt<'tcx> {
2126         self.tcx.tcx()
2127     }
2128 }
2129
2130 pub trait MaybeResult<T> {
2131     type Error;
2132
2133     fn from(x: Result<T, Self::Error>) -> Self;
2134     fn to_result(self) -> Result<T, Self::Error>;
2135 }
2136
2137 impl<T> MaybeResult<T> for T {
2138     type Error = !;
2139
2140     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2141         x
2142     }
2143     fn to_result(self) -> Result<T, Self::Error> {
2144         Ok(self)
2145     }
2146 }
2147
2148 impl<T, E> MaybeResult<T> for Result<T, E> {
2149     type Error = E;
2150
2151     fn from(x: Result<T, Self::Error>) -> Self {
2152         x
2153     }
2154     fn to_result(self) -> Result<T, Self::Error> {
2155         self
2156     }
2157 }
2158
2159 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2160
2161 /// Trait for contexts that want to be able to compute layouts of types.
2162 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2163 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2164     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2165     /// returned from `layout_of` (see also `handle_layout_err`).
2166     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2167
2168     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2169     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2170     #[inline]
2171     fn layout_tcx_at_span(&self) -> Span {
2172         DUMMY_SP
2173     }
2174
2175     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2176     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2177     ///
2178     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2179     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2180     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2181     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2182     fn handle_layout_err(
2183         &self,
2184         err: LayoutError<'tcx>,
2185         span: Span,
2186         ty: Ty<'tcx>,
2187     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2188 }
2189
2190 /// Blanket extension trait for contexts that can compute layouts of types.
2191 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2192     /// Computes the layout of a type. Note that this implicitly
2193     /// executes in "reveal all" mode, and will normalize the input type.
2194     #[inline]
2195     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2196         self.spanned_layout_of(ty, DUMMY_SP)
2197     }
2198
2199     /// Computes the layout of a type, at `span`. Note that this implicitly
2200     /// executes in "reveal all" mode, and will normalize the input type.
2201     // FIXME(eddyb) avoid passing information like this, and instead add more
2202     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2203     #[inline]
2204     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2205         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2206         let tcx = self.tcx().at(span);
2207
2208         MaybeResult::from(
2209             tcx.layout_of(self.param_env().and(ty))
2210                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2211         )
2212     }
2213 }
2214
2215 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2216
2217 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2218     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2219
2220     #[inline]
2221     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2222         err
2223     }
2224 }
2225
2226 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2227     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2228
2229     #[inline]
2230     fn layout_tcx_at_span(&self) -> Span {
2231         self.tcx.span
2232     }
2233
2234     #[inline]
2235     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2236         err
2237     }
2238 }
2239
2240 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2241 where
2242     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2243 {
2244     fn ty_and_layout_for_variant(
2245         this: TyAndLayout<'tcx>,
2246         cx: &C,
2247         variant_index: VariantIdx,
2248     ) -> TyAndLayout<'tcx> {
2249         let layout = match this.variants {
2250             Variants::Single { index }
2251                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2252                 if index == variant_index &&
2253                 // Don't confuse variants of uninhabited enums with the enum itself.
2254                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2255                 this.fields != FieldsShape::Primitive =>
2256             {
2257                 this.layout
2258             }
2259
2260             Variants::Single { index } => {
2261                 let tcx = cx.tcx();
2262                 let param_env = cx.param_env();
2263
2264                 // Deny calling for_variant more than once for non-Single enums.
2265                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2266                     assert_eq!(original_layout.variants, Variants::Single { index });
2267                 }
2268
2269                 let fields = match this.ty.kind() {
2270                     ty::Adt(def, _) if def.variants().is_empty() =>
2271                         bug!("for_variant called on zero-variant enum"),
2272                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2273                     _ => bug!(),
2274                 };
2275                 tcx.intern_layout(LayoutS {
2276                     variants: Variants::Single { index: variant_index },
2277                     fields: match NonZeroUsize::new(fields) {
2278                         Some(fields) => FieldsShape::Union(fields),
2279                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2280                     },
2281                     abi: Abi::Uninhabited,
2282                     largest_niche: None,
2283                     align: tcx.data_layout.i8_align,
2284                     size: Size::ZERO,
2285                 })
2286             }
2287
2288             Variants::Multiple { ref variants, .. } => variants[variant_index],
2289         };
2290
2291         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2292
2293         TyAndLayout { ty: this.ty, layout }
2294     }
2295
2296     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2297         enum TyMaybeWithLayout<'tcx> {
2298             Ty(Ty<'tcx>),
2299             TyAndLayout(TyAndLayout<'tcx>),
2300         }
2301
2302         fn field_ty_or_layout<'tcx>(
2303             this: TyAndLayout<'tcx>,
2304             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2305             i: usize,
2306         ) -> TyMaybeWithLayout<'tcx> {
2307             let tcx = cx.tcx();
2308             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2309                 TyAndLayout {
2310                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2311                     ty: tag.primitive().to_ty(tcx),
2312                 }
2313             };
2314
2315             match *this.ty.kind() {
2316                 ty::Bool
2317                 | ty::Char
2318                 | ty::Int(_)
2319                 | ty::Uint(_)
2320                 | ty::Float(_)
2321                 | ty::FnPtr(_)
2322                 | ty::Never
2323                 | ty::FnDef(..)
2324                 | ty::GeneratorWitness(..)
2325                 | ty::Foreign(..)
2326                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2327
2328                 // Potentially-fat pointers.
2329                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2330                     assert!(i < this.fields.count());
2331
2332                     // Reuse the fat `*T` type as its own thin pointer data field.
2333                     // This provides information about, e.g., DST struct pointees
2334                     // (which may have no non-DST form), and will work as long
2335                     // as the `Abi` or `FieldsShape` is checked by users.
2336                     if i == 0 {
2337                         let nil = tcx.mk_unit();
2338                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2339                             tcx.mk_mut_ptr(nil)
2340                         } else {
2341                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2342                         };
2343
2344                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2345                         // the `Result` should always work because the type is
2346                         // always either `*mut ()` or `&'static mut ()`.
2347                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2348                             ty: this.ty,
2349                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2350                         });
2351                     }
2352
2353                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2354                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2355                         ty::Dynamic(_, _) => {
2356                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2357                                 tcx.lifetimes.re_static,
2358                                 tcx.mk_array(tcx.types.usize, 3),
2359                             ))
2360                             /* FIXME: use actual fn pointers
2361                             Warning: naively computing the number of entries in the
2362                             vtable by counting the methods on the trait + methods on
2363                             all parent traits does not work, because some methods can
2364                             be not object safe and thus excluded from the vtable.
2365                             Increase this counter if you tried to implement this but
2366                             failed to do it without duplicating a lot of code from
2367                             other places in the compiler: 2
2368                             tcx.mk_tup(&[
2369                                 tcx.mk_array(tcx.types.usize, 3),
2370                                 tcx.mk_array(Option<fn()>),
2371                             ])
2372                             */
2373                         }
2374                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2375                     }
2376                 }
2377
2378                 // Arrays and slices.
2379                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2380                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2381
2382                 // Tuples, generators and closures.
2383                 ty::Closure(_, ref substs) => field_ty_or_layout(
2384                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2385                     cx,
2386                     i,
2387                 ),
2388
2389                 ty::Generator(def_id, ref substs, _) => match this.variants {
2390                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2391                         substs
2392                             .as_generator()
2393                             .state_tys(def_id, tcx)
2394                             .nth(index.as_usize())
2395                             .unwrap()
2396                             .nth(i)
2397                             .unwrap(),
2398                     ),
2399                     Variants::Multiple { tag, tag_field, .. } => {
2400                         if i == tag_field {
2401                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2402                         }
2403                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2404                     }
2405                 },
2406
2407                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2408
2409                 // ADTs.
2410                 ty::Adt(def, substs) => {
2411                     match this.variants {
2412                         Variants::Single { index } => {
2413                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2414                         }
2415
2416                         // Discriminant field for enums (where applicable).
2417                         Variants::Multiple { tag, .. } => {
2418                             assert_eq!(i, 0);
2419                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2420                         }
2421                     }
2422                 }
2423
2424                 ty::Projection(_)
2425                 | ty::Bound(..)
2426                 | ty::Placeholder(..)
2427                 | ty::Opaque(..)
2428                 | ty::Param(_)
2429                 | ty::Infer(_)
2430                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2431             }
2432         }
2433
2434         match field_ty_or_layout(this, cx, i) {
2435             TyMaybeWithLayout::Ty(field_ty) => {
2436                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2437                     bug!(
2438                         "failed to get layout for `{}`: {},\n\
2439                          despite it being a field (#{}) of an existing layout: {:#?}",
2440                         field_ty,
2441                         e,
2442                         i,
2443                         this
2444                     )
2445                 })
2446             }
2447             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2448         }
2449     }
2450
2451     fn ty_and_layout_pointee_info_at(
2452         this: TyAndLayout<'tcx>,
2453         cx: &C,
2454         offset: Size,
2455     ) -> Option<PointeeInfo> {
2456         let tcx = cx.tcx();
2457         let param_env = cx.param_env();
2458
2459         let addr_space_of_ty = |ty: Ty<'tcx>| {
2460             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2461         };
2462
2463         let pointee_info = match *this.ty.kind() {
2464             ty::RawPtr(mt) if offset.bytes() == 0 => {
2465                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2466                     size: layout.size,
2467                     align: layout.align.abi,
2468                     safe: None,
2469                     address_space: addr_space_of_ty(mt.ty),
2470                 })
2471             }
2472             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2473                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2474                     size: layout.size,
2475                     align: layout.align.abi,
2476                     safe: None,
2477                     address_space: cx.data_layout().instruction_address_space,
2478                 })
2479             }
2480             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2481                 let address_space = addr_space_of_ty(ty);
2482                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2483                     // Use conservative pointer kind if not optimizing. This saves us the
2484                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2485                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2486                     PointerKind::Shared
2487                 } else {
2488                     match mt {
2489                         hir::Mutability::Not => {
2490                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2491                                 PointerKind::Frozen
2492                             } else {
2493                                 PointerKind::Shared
2494                             }
2495                         }
2496                         hir::Mutability::Mut => {
2497                             // References to self-referential structures should not be considered
2498                             // noalias, as another pointer to the structure can be obtained, that
2499                             // is not based-on the original reference. We consider all !Unpin
2500                             // types to be potentially self-referential here.
2501                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2502                                 PointerKind::UniqueBorrowed
2503                             } else {
2504                                 PointerKind::Shared
2505                             }
2506                         }
2507                     }
2508                 };
2509
2510                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2511                     size: layout.size,
2512                     align: layout.align.abi,
2513                     safe: Some(kind),
2514                     address_space,
2515                 })
2516             }
2517
2518             _ => {
2519                 let mut data_variant = match this.variants {
2520                     // Within the discriminant field, only the niche itself is
2521                     // always initialized, so we only check for a pointer at its
2522                     // offset.
2523                     //
2524                     // If the niche is a pointer, it's either valid (according
2525                     // to its type), or null (which the niche field's scalar
2526                     // validity range encodes).  This allows using
2527                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2528                     // this will continue to work as long as we don't start
2529                     // using more niches than just null (e.g., the first page of
2530                     // the address space, or unaligned pointers).
2531                     Variants::Multiple {
2532                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2533                         tag_field,
2534                         ..
2535                     } if this.fields.offset(tag_field) == offset => {
2536                         Some(this.for_variant(cx, dataful_variant))
2537                     }
2538                     _ => Some(this),
2539                 };
2540
2541                 if let Some(variant) = data_variant {
2542                     // We're not interested in any unions.
2543                     if let FieldsShape::Union(_) = variant.fields {
2544                         data_variant = None;
2545                     }
2546                 }
2547
2548                 let mut result = None;
2549
2550                 if let Some(variant) = data_variant {
2551                     let ptr_end = offset + Pointer.size(cx);
2552                     for i in 0..variant.fields.count() {
2553                         let field_start = variant.fields.offset(i);
2554                         if field_start <= offset {
2555                             let field = variant.field(cx, i);
2556                             result = field.to_result().ok().and_then(|field| {
2557                                 if ptr_end <= field_start + field.size {
2558                                     // We found the right field, look inside it.
2559                                     let field_info =
2560                                         field.pointee_info_at(cx, offset - field_start);
2561                                     field_info
2562                                 } else {
2563                                     None
2564                                 }
2565                             });
2566                             if result.is_some() {
2567                                 break;
2568                             }
2569                         }
2570                     }
2571                 }
2572
2573                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2574                 if let Some(ref mut pointee) = result {
2575                     if let ty::Adt(def, _) = this.ty.kind() {
2576                         if def.is_box() && offset.bytes() == 0 {
2577                             pointee.safe = Some(PointerKind::UniqueOwned);
2578                         }
2579                     }
2580                 }
2581
2582                 result
2583             }
2584         };
2585
2586         debug!(
2587             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2588             offset,
2589             this.ty.kind(),
2590             pointee_info
2591         );
2592
2593         pointee_info
2594     }
2595
2596     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2597         matches!(this.ty.kind(), ty::Adt(..))
2598     }
2599
2600     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2601         this.ty.kind() == &ty::Never
2602     }
2603
2604     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2605         matches!(this.ty.kind(), ty::Tuple(..))
2606     }
2607
2608     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2609         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2610     }
2611 }
2612
2613 impl<'tcx> ty::Instance<'tcx> {
2614     // NOTE(eddyb) this is private to avoid using it from outside of
2615     // `fn_abi_of_instance` - any other uses are either too high-level
2616     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2617     // or should go through `FnAbi` instead, to avoid losing any
2618     // adjustments `fn_abi_of_instance` might be performing.
2619     fn fn_sig_for_fn_abi(
2620         &self,
2621         tcx: TyCtxt<'tcx>,
2622         param_env: ty::ParamEnv<'tcx>,
2623     ) -> ty::PolyFnSig<'tcx> {
2624         let ty = self.ty(tcx, param_env);
2625         match *ty.kind() {
2626             ty::FnDef(..) => {
2627                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2628                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2629                 // (i.e. due to being inside a projection that got normalized, see
2630                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2631                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2632                 let mut sig = match *ty.kind() {
2633                     ty::FnDef(def_id, substs) => tcx
2634                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2635                         .subst(tcx, substs),
2636                     _ => unreachable!(),
2637                 };
2638
2639                 if let ty::InstanceDef::VtableShim(..) = self.def {
2640                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2641                     sig = sig.map_bound(|mut sig| {
2642                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2643                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2644                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2645                         sig
2646                     });
2647                 }
2648                 sig
2649             }
2650             ty::Closure(def_id, substs) => {
2651                 let sig = substs.as_closure().sig();
2652
2653                 let bound_vars = tcx.mk_bound_variable_kinds(
2654                     sig.bound_vars()
2655                         .iter()
2656                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2657                 );
2658                 let br = ty::BoundRegion {
2659                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2660                     kind: ty::BoundRegionKind::BrEnv,
2661                 };
2662                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2663                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2664
2665                 let sig = sig.skip_binder();
2666                 ty::Binder::bind_with_vars(
2667                     tcx.mk_fn_sig(
2668                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2669                         sig.output(),
2670                         sig.c_variadic,
2671                         sig.unsafety,
2672                         sig.abi,
2673                     ),
2674                     bound_vars,
2675                 )
2676             }
2677             ty::Generator(_, substs, _) => {
2678                 let sig = substs.as_generator().poly_sig();
2679
2680                 let bound_vars = tcx.mk_bound_variable_kinds(
2681                     sig.bound_vars()
2682                         .iter()
2683                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2684                 );
2685                 let br = ty::BoundRegion {
2686                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2687                     kind: ty::BoundRegionKind::BrEnv,
2688                 };
2689                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2690                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2691
2692                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2693                 let pin_adt_ref = tcx.adt_def(pin_did);
2694                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2695                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2696
2697                 let sig = sig.skip_binder();
2698                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2699                 let state_adt_ref = tcx.adt_def(state_did);
2700                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2701                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2702                 ty::Binder::bind_with_vars(
2703                     tcx.mk_fn_sig(
2704                         [env_ty, sig.resume_ty].iter(),
2705                         &ret_ty,
2706                         false,
2707                         hir::Unsafety::Normal,
2708                         rustc_target::spec::abi::Abi::Rust,
2709                     ),
2710                     bound_vars,
2711                 )
2712             }
2713             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2714         }
2715     }
2716 }
2717
2718 /// Calculates whether a function's ABI can unwind or not.
2719 ///
2720 /// This takes two primary parameters:
2721 ///
2722 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2723 ///   codegen attrs for a defined function. For function pointers this set of
2724 ///   flags is the empty set. This is only applicable for Rust-defined
2725 ///   functions, and generally isn't needed except for small optimizations where
2726 ///   we try to say a function which otherwise might look like it could unwind
2727 ///   doesn't actually unwind (such as for intrinsics and such).
2728 ///
2729 /// * `abi` - this is the ABI that the function is defined with. This is the
2730 ///   primary factor for determining whether a function can unwind or not.
2731 ///
2732 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2733 /// panics are implemented with unwinds on most platform (when
2734 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2735 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2736 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2737 /// defined for each ABI individually, but it always corresponds to some form of
2738 /// stack-based unwinding (the exact mechanism of which varies
2739 /// platform-by-platform).
2740 ///
2741 /// Rust functions are classified whether or not they can unwind based on the
2742 /// active "panic strategy". In other words Rust functions are considered to
2743 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2744 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2745 /// only if the final panic mode is panic=abort. In this scenario any code
2746 /// previously compiled assuming that a function can unwind is still correct, it
2747 /// just never happens to actually unwind at runtime.
2748 ///
2749 /// This function's answer to whether or not a function can unwind is quite
2750 /// impactful throughout the compiler. This affects things like:
2751 ///
2752 /// * Calling a function which can't unwind means codegen simply ignores any
2753 ///   associated unwinding cleanup.
2754 /// * Calling a function which can unwind from a function which can't unwind
2755 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2756 ///   aborts the process.
2757 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2758 ///   affects various optimizations and codegen.
2759 ///
2760 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2761 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2762 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2763 /// might (from a foreign exception or similar).
2764 #[inline]
2765 pub fn fn_can_unwind<'tcx>(
2766     tcx: TyCtxt<'tcx>,
2767     codegen_fn_attr_flags: CodegenFnAttrFlags,
2768     abi: SpecAbi,
2769 ) -> bool {
2770     // Special attribute for functions which can't unwind.
2771     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2772         return false;
2773     }
2774
2775     // Otherwise if this isn't special then unwinding is generally determined by
2776     // the ABI of the itself. ABIs like `C` have variants which also
2777     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2778     // ABIs have such an option. Otherwise the only other thing here is Rust
2779     // itself, and those ABIs are determined by the panic strategy configured
2780     // for this compilation.
2781     //
2782     // Unfortunately at this time there's also another caveat. Rust [RFC
2783     // 2945][rfc] has been accepted and is in the process of being implemented
2784     // and stabilized. In this interim state we need to deal with historical
2785     // rustc behavior as well as plan for future rustc behavior.
2786     //
2787     // Historically functions declared with `extern "C"` were marked at the
2788     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2789     // or not. This is UB for functions in `panic=unwind` mode that then
2790     // actually panic and unwind. Note that this behavior is true for both
2791     // externally declared functions as well as Rust-defined function.
2792     //
2793     // To fix this UB rustc would like to change in the future to catch unwinds
2794     // from function calls that may unwind within a Rust-defined `extern "C"`
2795     // function and forcibly abort the process, thereby respecting the
2796     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2797     // ready to roll out, so determining whether or not the `C` family of ABIs
2798     // unwinds is conditional not only on their definition but also whether the
2799     // `#![feature(c_unwind)]` feature gate is active.
2800     //
2801     // Note that this means that unlike historical compilers rustc now, by
2802     // default, unconditionally thinks that the `C` ABI may unwind. This will
2803     // prevent some optimization opportunities, however, so we try to scope this
2804     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2805     // to `panic=abort`).
2806     //
2807     // Eventually the check against `c_unwind` here will ideally get removed and
2808     // this'll be a little cleaner as it'll be a straightforward check of the
2809     // ABI.
2810     //
2811     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2812     use SpecAbi::*;
2813     match abi {
2814         C { unwind }
2815         | System { unwind }
2816         | Cdecl { unwind }
2817         | Stdcall { unwind }
2818         | Fastcall { unwind }
2819         | Vectorcall { unwind }
2820         | Thiscall { unwind }
2821         | Aapcs { unwind }
2822         | Win64 { unwind }
2823         | SysV64 { unwind } => {
2824             unwind
2825                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2826         }
2827         PtxKernel
2828         | Msp430Interrupt
2829         | X86Interrupt
2830         | AmdGpuKernel
2831         | EfiApi
2832         | AvrInterrupt
2833         | AvrNonBlockingInterrupt
2834         | CCmseNonSecureCall
2835         | Wasm
2836         | RustIntrinsic
2837         | PlatformIntrinsic
2838         | Unadjusted => false,
2839         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2840     }
2841 }
2842
2843 #[inline]
2844 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2845     use rustc_target::spec::abi::Abi::*;
2846     match tcx.sess.target.adjust_abi(abi) {
2847         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2848
2849         // It's the ABI's job to select this, not ours.
2850         System { .. } => bug!("system abi should be selected elsewhere"),
2851         EfiApi => bug!("eficall abi should be selected elsewhere"),
2852
2853         Stdcall { .. } => Conv::X86Stdcall,
2854         Fastcall { .. } => Conv::X86Fastcall,
2855         Vectorcall { .. } => Conv::X86VectorCall,
2856         Thiscall { .. } => Conv::X86ThisCall,
2857         C { .. } => Conv::C,
2858         Unadjusted => Conv::C,
2859         Win64 { .. } => Conv::X86_64Win64,
2860         SysV64 { .. } => Conv::X86_64SysV,
2861         Aapcs { .. } => Conv::ArmAapcs,
2862         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2863         PtxKernel => Conv::PtxKernel,
2864         Msp430Interrupt => Conv::Msp430Intr,
2865         X86Interrupt => Conv::X86Intr,
2866         AmdGpuKernel => Conv::AmdGpuKernel,
2867         AvrInterrupt => Conv::AvrInterrupt,
2868         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2869         Wasm => Conv::C,
2870
2871         // These API constants ought to be more specific...
2872         Cdecl { .. } => Conv::C,
2873     }
2874 }
2875
2876 /// Error produced by attempting to compute or adjust a `FnAbi`.
2877 #[derive(Copy, Clone, Debug, HashStable)]
2878 pub enum FnAbiError<'tcx> {
2879     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2880     Layout(LayoutError<'tcx>),
2881
2882     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2883     AdjustForForeignAbi(call::AdjustForForeignAbiError),
2884 }
2885
2886 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2887     fn from(err: LayoutError<'tcx>) -> Self {
2888         Self::Layout(err)
2889     }
2890 }
2891
2892 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2893     fn from(err: call::AdjustForForeignAbiError) -> Self {
2894         Self::AdjustForForeignAbi(err)
2895     }
2896 }
2897
2898 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2899     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2900         match self {
2901             Self::Layout(err) => err.fmt(f),
2902             Self::AdjustForForeignAbi(err) => err.fmt(f),
2903         }
2904     }
2905 }
2906
2907 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2908 // just for error handling.
2909 #[derive(Debug)]
2910 pub enum FnAbiRequest<'tcx> {
2911     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2912     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2913 }
2914
2915 /// Trait for contexts that want to be able to compute `FnAbi`s.
2916 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2917 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2918     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2919     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2920     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2921
2922     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2923     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2924     ///
2925     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2926     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2927     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2928     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2929     fn handle_fn_abi_err(
2930         &self,
2931         err: FnAbiError<'tcx>,
2932         span: Span,
2933         fn_abi_request: FnAbiRequest<'tcx>,
2934     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2935 }
2936
2937 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2938 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2939     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2940     ///
2941     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2942     /// instead, where the instance is an `InstanceDef::Virtual`.
2943     #[inline]
2944     fn fn_abi_of_fn_ptr(
2945         &self,
2946         sig: ty::PolyFnSig<'tcx>,
2947         extra_args: &'tcx ty::List<Ty<'tcx>>,
2948     ) -> Self::FnAbiOfResult {
2949         // FIXME(eddyb) get a better `span` here.
2950         let span = self.layout_tcx_at_span();
2951         let tcx = self.tcx().at(span);
2952
2953         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2954             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2955         ))
2956     }
2957
2958     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2959     /// direct calls to an `fn`.
2960     ///
2961     /// NB: that includes virtual calls, which are represented by "direct calls"
2962     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2963     #[inline]
2964     fn fn_abi_of_instance(
2965         &self,
2966         instance: ty::Instance<'tcx>,
2967         extra_args: &'tcx ty::List<Ty<'tcx>>,
2968     ) -> Self::FnAbiOfResult {
2969         // FIXME(eddyb) get a better `span` here.
2970         let span = self.layout_tcx_at_span();
2971         let tcx = self.tcx().at(span);
2972
2973         MaybeResult::from(
2974             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2975                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2976                 // we can get some kind of span even if one wasn't provided.
2977                 // However, we don't do this early in order to avoid calling
2978                 // `def_span` unconditionally (which may have a perf penalty).
2979                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2980                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2981             }),
2982         )
2983     }
2984 }
2985
2986 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2987
2988 fn fn_abi_of_fn_ptr<'tcx>(
2989     tcx: TyCtxt<'tcx>,
2990     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2991 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2992     let (param_env, (sig, extra_args)) = query.into_parts();
2993
2994     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2995         sig,
2996         extra_args,
2997         None,
2998         CodegenFnAttrFlags::empty(),
2999         false,
3000     )
3001 }
3002
3003 fn fn_abi_of_instance<'tcx>(
3004     tcx: TyCtxt<'tcx>,
3005     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3006 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3007     let (param_env, (instance, extra_args)) = query.into_parts();
3008
3009     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3010
3011     let caller_location = if instance.def.requires_caller_location(tcx) {
3012         Some(tcx.caller_location_ty())
3013     } else {
3014         None
3015     };
3016
3017     let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
3018
3019     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3020         sig,
3021         extra_args,
3022         caller_location,
3023         attrs,
3024         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3025     )
3026 }
3027
3028 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3029     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3030     // arguments of this method, into a separate `struct`.
3031     fn fn_abi_new_uncached(
3032         &self,
3033         sig: ty::PolyFnSig<'tcx>,
3034         extra_args: &[Ty<'tcx>],
3035         caller_location: Option<Ty<'tcx>>,
3036         codegen_fn_attr_flags: CodegenFnAttrFlags,
3037         // FIXME(eddyb) replace this with something typed, like an `enum`.
3038         force_thin_self_ptr: bool,
3039     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3040         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3041
3042         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3043
3044         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3045
3046         let mut inputs = sig.inputs();
3047         let extra_args = if sig.abi == RustCall {
3048             assert!(!sig.c_variadic && extra_args.is_empty());
3049
3050             if let Some(input) = sig.inputs().last() {
3051                 if let ty::Tuple(tupled_arguments) = input.kind() {
3052                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3053                     tupled_arguments
3054                 } else {
3055                     bug!(
3056                         "argument to function with \"rust-call\" ABI \
3057                             is not a tuple"
3058                     );
3059                 }
3060             } else {
3061                 bug!(
3062                     "argument to function with \"rust-call\" ABI \
3063                         is not a tuple"
3064                 );
3065             }
3066         } else {
3067             assert!(sig.c_variadic || extra_args.is_empty());
3068             extra_args
3069         };
3070
3071         let target = &self.tcx.sess.target;
3072         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3073         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3074         let linux_s390x_gnu_like =
3075             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3076         let linux_sparc64_gnu_like =
3077             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3078         let linux_powerpc_gnu_like =
3079             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3080         use SpecAbi::*;
3081         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3082
3083         // Handle safe Rust thin and fat pointers.
3084         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3085                                       scalar: Scalar,
3086                                       layout: TyAndLayout<'tcx>,
3087                                       offset: Size,
3088                                       is_return: bool| {
3089             // Booleans are always a noundef i1 that needs to be zero-extended.
3090             if scalar.is_bool() {
3091                 attrs.ext(ArgExtension::Zext);
3092                 attrs.set(ArgAttribute::NoUndef);
3093                 return;
3094             }
3095
3096             // Scalars which have invalid values cannot be undef.
3097             if !scalar.is_always_valid(self) {
3098                 attrs.set(ArgAttribute::NoUndef);
3099             }
3100
3101             // Only pointer types handled below.
3102             let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3103
3104             if !valid_range.contains(0) {
3105                 attrs.set(ArgAttribute::NonNull);
3106             }
3107
3108             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3109                 if let Some(kind) = pointee.safe {
3110                     attrs.pointee_align = Some(pointee.align);
3111
3112                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3113                     // for the entire duration of the function as they can be deallocated
3114                     // at any time. Set their valid size to 0.
3115                     attrs.pointee_size = match kind {
3116                         PointerKind::UniqueOwned => Size::ZERO,
3117                         _ => pointee.size,
3118                     };
3119
3120                     // `Box`, `&T`, and `&mut T` cannot be undef.
3121                     // Note that this only applies to the value of the pointer itself;
3122                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3123                     attrs.set(ArgAttribute::NoUndef);
3124
3125                     // `Box` pointer parameters never alias because ownership is transferred
3126                     // `&mut` pointer parameters never alias other parameters,
3127                     // or mutable global data
3128                     //
3129                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3130                     // and can be marked as both `readonly` and `noalias`, as
3131                     // LLVM's definition of `noalias` is based solely on memory
3132                     // dependencies rather than pointer equality
3133                     //
3134                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3135                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3136                     // or not to actually emit the attribute. It can also be controlled with the
3137                     // `-Zmutable-noalias` debugging option.
3138                     let no_alias = match kind {
3139                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3140                         PointerKind::UniqueOwned => true,
3141                         PointerKind::Frozen => !is_return,
3142                     };
3143                     if no_alias {
3144                         attrs.set(ArgAttribute::NoAlias);
3145                     }
3146
3147                     if kind == PointerKind::Frozen && !is_return {
3148                         attrs.set(ArgAttribute::ReadOnly);
3149                     }
3150
3151                     if kind == PointerKind::UniqueBorrowed && !is_return {
3152                         attrs.set(ArgAttribute::NoAliasMutRef);
3153                     }
3154                 }
3155             }
3156         };
3157
3158         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3159             let is_return = arg_idx.is_none();
3160
3161             let layout = self.layout_of(ty)?;
3162             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3163                 // Don't pass the vtable, it's not an argument of the virtual fn.
3164                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3165                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3166                 make_thin_self_ptr(self, layout)
3167             } else {
3168                 layout
3169             };
3170
3171             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3172                 let mut attrs = ArgAttributes::new();
3173                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3174                 attrs
3175             });
3176
3177             if arg.layout.is_zst() {
3178                 // For some forsaken reason, x86_64-pc-windows-gnu
3179                 // doesn't ignore zero-sized struct arguments.
3180                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3181                 if is_return
3182                     || rust_abi
3183                     || (!win_x64_gnu
3184                         && !linux_s390x_gnu_like
3185                         && !linux_sparc64_gnu_like
3186                         && !linux_powerpc_gnu_like)
3187                 {
3188                     arg.mode = PassMode::Ignore;
3189                 }
3190             }
3191
3192             Ok(arg)
3193         };
3194
3195         let mut fn_abi = FnAbi {
3196             ret: arg_of(sig.output(), None)?,
3197             args: inputs
3198                 .iter()
3199                 .copied()
3200                 .chain(extra_args.iter().copied())
3201                 .chain(caller_location)
3202                 .enumerate()
3203                 .map(|(i, ty)| arg_of(ty, Some(i)))
3204                 .collect::<Result<_, _>>()?,
3205             c_variadic: sig.c_variadic,
3206             fixed_count: inputs.len(),
3207             conv,
3208             can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3209         };
3210         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3211         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3212         Ok(self.tcx.arena.alloc(fn_abi))
3213     }
3214
3215     fn fn_abi_adjust_for_abi(
3216         &self,
3217         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3218         abi: SpecAbi,
3219     ) -> Result<(), FnAbiError<'tcx>> {
3220         if abi == SpecAbi::Unadjusted {
3221             return Ok(());
3222         }
3223
3224         if abi == SpecAbi::Rust
3225             || abi == SpecAbi::RustCall
3226             || abi == SpecAbi::RustIntrinsic
3227             || abi == SpecAbi::PlatformIntrinsic
3228         {
3229             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3230                 if arg.is_ignore() {
3231                     return;
3232                 }
3233
3234                 match arg.layout.abi {
3235                     Abi::Aggregate { .. } => {}
3236
3237                     // This is a fun case! The gist of what this is doing is
3238                     // that we want callers and callees to always agree on the
3239                     // ABI of how they pass SIMD arguments. If we were to *not*
3240                     // make these arguments indirect then they'd be immediates
3241                     // in LLVM, which means that they'd used whatever the
3242                     // appropriate ABI is for the callee and the caller. That
3243                     // means, for example, if the caller doesn't have AVX
3244                     // enabled but the callee does, then passing an AVX argument
3245                     // across this boundary would cause corrupt data to show up.
3246                     //
3247                     // This problem is fixed by unconditionally passing SIMD
3248                     // arguments through memory between callers and callees
3249                     // which should get them all to agree on ABI regardless of
3250                     // target feature sets. Some more information about this
3251                     // issue can be found in #44367.
3252                     //
3253                     // Note that the platform intrinsic ABI is exempt here as
3254                     // that's how we connect up to LLVM and it's unstable
3255                     // anyway, we control all calls to it in libstd.
3256                     Abi::Vector { .. }
3257                         if abi != SpecAbi::PlatformIntrinsic
3258                             && self.tcx.sess.target.simd_types_indirect =>
3259                     {
3260                         arg.make_indirect();
3261                         return;
3262                     }
3263
3264                     _ => return,
3265                 }
3266
3267                 let size = arg.layout.size;
3268                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3269                     arg.make_indirect();
3270                 } else {
3271                     // We want to pass small aggregates as immediates, but using
3272                     // a LLVM aggregate type for this leads to bad optimizations,
3273                     // so we pick an appropriately sized integer type instead.
3274                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3275                 }
3276             };
3277             fixup(&mut fn_abi.ret);
3278             for arg in &mut fn_abi.args {
3279                 fixup(arg);
3280             }
3281         } else {
3282             fn_abi.adjust_for_foreign_abi(self, abi)?;
3283         }
3284
3285         Ok(())
3286     }
3287 }
3288
3289 fn make_thin_self_ptr<'tcx>(
3290     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3291     layout: TyAndLayout<'tcx>,
3292 ) -> TyAndLayout<'tcx> {
3293     let tcx = cx.tcx();
3294     let fat_pointer_ty = if layout.is_unsized() {
3295         // unsized `self` is passed as a pointer to `self`
3296         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3297         tcx.mk_mut_ptr(layout.ty)
3298     } else {
3299         match layout.abi {
3300             Abi::ScalarPair(..) => (),
3301             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3302         }
3303
3304         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3305         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3306         // elsewhere in the compiler as a method on a `dyn Trait`.
3307         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3308         // get a built-in pointer type
3309         let mut fat_pointer_layout = layout;
3310         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3311             && !fat_pointer_layout.ty.is_region_ptr()
3312         {
3313             for i in 0..fat_pointer_layout.fields.count() {
3314                 let field_layout = fat_pointer_layout.field(cx, i);
3315
3316                 if !field_layout.is_zst() {
3317                     fat_pointer_layout = field_layout;
3318                     continue 'descend_newtypes;
3319                 }
3320             }
3321
3322             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3323         }
3324
3325         fat_pointer_layout.ty
3326     };
3327
3328     // we now have a type like `*mut RcBox<dyn Trait>`
3329     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3330     // this is understood as a special case elsewhere in the compiler
3331     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3332
3333     TyAndLayout {
3334         ty: fat_pointer_ty,
3335
3336         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3337         // should always work because the type is always `*mut ()`.
3338         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3339     }
3340 }