]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Auto merge of #101969 - reez12g:issue-101306, r=reez12g
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::{
5     self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
6     TyCtxt, TypeVisitable,
7 };
8 use rustc_ast as ast;
9 use rustc_attr as attr;
10 use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
11 use rustc_hir as hir;
12 use rustc_hir::def_id::DefId;
13 use rustc_hir::lang_items::LangItem;
14 use rustc_index::bit_set::BitSet;
15 use rustc_index::vec::{Idx, IndexVec};
16 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
17 use rustc_span::symbol::Symbol;
18 use rustc_span::{Span, DUMMY_SP};
19 use rustc_target::abi::call::{
20     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
21 };
22 use rustc_target::abi::*;
23 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
24
25 use std::cmp::{self, Ordering};
26 use std::fmt;
27 use std::iter;
28 use std::num::NonZeroUsize;
29 use std::ops::Bound;
30
31 use rand::{seq::SliceRandom, SeedableRng};
32 use rand_xoshiro::Xoshiro128StarStar;
33
34 pub fn provide(providers: &mut ty::query::Providers) {
35     *providers =
36         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
37 }
38
39 pub trait IntegerExt {
40     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
41     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
43     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44     fn repr_discr<'tcx>(
45         tcx: TyCtxt<'tcx>,
46         ty: Ty<'tcx>,
47         repr: &ReprOptions,
48         min: i128,
49         max: i128,
50     ) -> (Integer, bool);
51 }
52
53 impl IntegerExt for Integer {
54     #[inline]
55     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
56         match (*self, signed) {
57             (I8, false) => tcx.types.u8,
58             (I16, false) => tcx.types.u16,
59             (I32, false) => tcx.types.u32,
60             (I64, false) => tcx.types.u64,
61             (I128, false) => tcx.types.u128,
62             (I8, true) => tcx.types.i8,
63             (I16, true) => tcx.types.i16,
64             (I32, true) => tcx.types.i32,
65             (I64, true) => tcx.types.i64,
66             (I128, true) => tcx.types.i128,
67         }
68     }
69
70     /// Gets the Integer type from an attr::IntType.
71     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
72         let dl = cx.data_layout();
73
74         match ity {
75             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
76             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
77             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
78             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
79             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
80             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
81                 dl.ptr_sized_integer()
82             }
83         }
84     }
85
86     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
87         match ity {
88             ty::IntTy::I8 => I8,
89             ty::IntTy::I16 => I16,
90             ty::IntTy::I32 => I32,
91             ty::IntTy::I64 => I64,
92             ty::IntTy::I128 => I128,
93             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
94         }
95     }
96     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
97         match ity {
98             ty::UintTy::U8 => I8,
99             ty::UintTy::U16 => I16,
100             ty::UintTy::U32 => I32,
101             ty::UintTy::U64 => I64,
102             ty::UintTy::U128 => I128,
103             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
104         }
105     }
106
107     /// Finds the appropriate Integer type and signedness for the given
108     /// signed discriminant range and `#[repr]` attribute.
109     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
110     /// that shouldn't affect anything, other than maybe debuginfo.
111     fn repr_discr<'tcx>(
112         tcx: TyCtxt<'tcx>,
113         ty: Ty<'tcx>,
114         repr: &ReprOptions,
115         min: i128,
116         max: i128,
117     ) -> (Integer, bool) {
118         // Theoretically, negative values could be larger in unsigned representation
119         // than the unsigned representation of the signed minimum. However, if there
120         // are any negative values, the only valid unsigned representation is u128
121         // which can fit all i128 values, so the result remains unaffected.
122         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
123         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
124
125         if let Some(ity) = repr.int {
126             let discr = Integer::from_attr(&tcx, ity);
127             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
128             if discr < fit {
129                 bug!(
130                     "Integer::repr_discr: `#[repr]` hint too small for \
131                       discriminant range of enum `{}",
132                     ty
133                 )
134             }
135             return (discr, ity.is_signed());
136         }
137
138         let at_least = if repr.c() {
139             // This is usually I32, however it can be different on some platforms,
140             // notably hexagon and arm-none/thumb-none
141             tcx.data_layout().c_enum_min_size
142         } else {
143             // repr(Rust) enums try to be as small as possible
144             I8
145         };
146
147         // If there are no negative values, we can use the unsigned fit.
148         if min >= 0 {
149             (cmp::max(unsigned_fit, at_least), false)
150         } else {
151             (cmp::max(signed_fit, at_least), true)
152         }
153     }
154 }
155
156 pub trait PrimitiveExt {
157     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
159 }
160
161 impl PrimitiveExt for Primitive {
162     #[inline]
163     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
164         match *self {
165             Int(i, signed) => i.to_ty(tcx, signed),
166             F32 => tcx.types.f32,
167             F64 => tcx.types.f64,
168             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169         }
170     }
171
172     /// Return an *integer* type matching this primitive.
173     /// Useful in particular when dealing with enum discriminants.
174     #[inline]
175     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
176         match *self {
177             Int(i, signed) => i.to_ty(tcx, signed),
178             Pointer => tcx.types.usize,
179             F32 | F64 => bug!("floats do not have an int type"),
180         }
181     }
182 }
183
184 /// The first half of a fat pointer.
185 ///
186 /// - For a trait object, this is the address of the box.
187 /// - For a slice, this is the base address.
188 pub const FAT_PTR_ADDR: usize = 0;
189
190 /// The second half of a fat pointer.
191 ///
192 /// - For a trait object, this is the address of the vtable.
193 /// - For a slice, this is the length.
194 pub const FAT_PTR_EXTRA: usize = 1;
195
196 /// The maximum supported number of lanes in a SIMD vector.
197 ///
198 /// This value is selected based on backend support:
199 /// * LLVM does not appear to have a vector width limit.
200 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
201 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
202
203 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
204 pub enum LayoutError<'tcx> {
205     Unknown(Ty<'tcx>),
206     SizeOverflow(Ty<'tcx>),
207     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
208 }
209
210 impl<'a> IntoDiagnostic<'a, !> for LayoutError<'a> {
211     fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, !> {
212         handler.struct_fatal(self.to_string())
213     }
214 }
215
216 impl<'tcx> fmt::Display for LayoutError<'tcx> {
217     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
218         match *self {
219             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
220             LayoutError::SizeOverflow(ty) => {
221                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
222             }
223             LayoutError::NormalizationFailure(t, e) => write!(
224                 f,
225                 "unable to determine layout for `{}` because `{}` cannot be normalized",
226                 t,
227                 e.get_type_for_failure()
228             ),
229         }
230     }
231 }
232
233 #[instrument(skip(tcx, query), level = "debug")]
234 fn layout_of<'tcx>(
235     tcx: TyCtxt<'tcx>,
236     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
237 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
238     let (param_env, ty) = query.into_parts();
239     debug!(?ty);
240
241     let param_env = param_env.with_reveal_all_normalized(tcx);
242     let unnormalized_ty = ty;
243
244     // FIXME: We might want to have two different versions of `layout_of`:
245     // One that can be called after typecheck has completed and can use
246     // `normalize_erasing_regions` here and another one that can be called
247     // before typecheck has completed and uses `try_normalize_erasing_regions`.
248     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
249         Ok(t) => t,
250         Err(normalization_error) => {
251             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
252         }
253     };
254
255     if ty != unnormalized_ty {
256         // Ensure this layout is also cached for the normalized type.
257         return tcx.layout_of(param_env.and(ty));
258     }
259
260     let cx = LayoutCx { tcx, param_env };
261
262     let layout = cx.layout_of_uncached(ty)?;
263     let layout = TyAndLayout { ty, layout };
264
265     cx.record_layout_for_printing(layout);
266
267     sanity_check_layout(&cx, &layout);
268
269     Ok(layout)
270 }
271
272 #[derive(Clone, Copy)]
273 pub struct LayoutCx<'tcx, C> {
274     pub tcx: C,
275     pub param_env: ty::ParamEnv<'tcx>,
276 }
277
278 #[derive(Copy, Clone, Debug)]
279 enum StructKind {
280     /// A tuple, closure, or univariant which cannot be coerced to unsized.
281     AlwaysSized,
282     /// A univariant, the last field of which may be coerced to unsized.
283     MaybeUnsized,
284     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
285     Prefixed(Size, Align),
286 }
287
288 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
289 // This is used to go between `memory_index` (source field order to memory order)
290 // and `inverse_memory_index` (memory order to source field order).
291 // See also `FieldsShape::Arbitrary::memory_index` for more details.
292 // FIXME(eddyb) build a better abstraction for permutations, if possible.
293 fn invert_mapping(map: &[u32]) -> Vec<u32> {
294     let mut inverse = vec![0; map.len()];
295     for i in 0..map.len() {
296         inverse[map[i] as usize] = i as u32;
297     }
298     inverse
299 }
300
301 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
302     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
303         let dl = self.data_layout();
304         let b_align = b.align(dl);
305         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
306         let b_offset = a.size(dl).align_to(b_align.abi);
307         let size = (b_offset + b.size(dl)).align_to(align.abi);
308
309         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
310         // returns the last maximum.
311         let largest_niche = Niche::from_scalar(dl, b_offset, b)
312             .into_iter()
313             .chain(Niche::from_scalar(dl, Size::ZERO, a))
314             .max_by_key(|niche| niche.available(dl));
315
316         LayoutS {
317             variants: Variants::Single { index: VariantIdx::new(0) },
318             fields: FieldsShape::Arbitrary {
319                 offsets: vec![Size::ZERO, b_offset],
320                 memory_index: vec![0, 1],
321             },
322             abi: Abi::ScalarPair(a, b),
323             largest_niche,
324             align,
325             size,
326         }
327     }
328
329     fn univariant_uninterned(
330         &self,
331         ty: Ty<'tcx>,
332         fields: &[TyAndLayout<'_>],
333         repr: &ReprOptions,
334         kind: StructKind,
335     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
336         let dl = self.data_layout();
337         let pack = repr.pack;
338         if pack.is_some() && repr.align.is_some() {
339             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
340             return Err(LayoutError::Unknown(ty));
341         }
342
343         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
344
345         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
346
347         let optimize = !repr.inhibit_struct_field_reordering_opt();
348         if optimize {
349             let end =
350                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
351             let optimizing = &mut inverse_memory_index[..end];
352             let field_align = |f: &TyAndLayout<'_>| {
353                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
354             };
355
356             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
357             // the field ordering to try and catch some code making assumptions about layouts
358             // we don't guarantee
359             if repr.can_randomize_type_layout() {
360                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
361                 // randomize field ordering with
362                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
363
364                 // Shuffle the ordering of the fields
365                 optimizing.shuffle(&mut rng);
366
367             // Otherwise we just leave things alone and actually optimize the type's fields
368             } else {
369                 match kind {
370                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
371                         optimizing.sort_by_key(|&x| {
372                             // Place ZSTs first to avoid "interesting offsets",
373                             // especially with only one or two non-ZST fields.
374                             let f = &fields[x as usize];
375                             (!f.is_zst(), cmp::Reverse(field_align(f)))
376                         });
377                     }
378
379                     StructKind::Prefixed(..) => {
380                         // Sort in ascending alignment so that the layout stays optimal
381                         // regardless of the prefix
382                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
383                     }
384                 }
385
386                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
387                 //                 regardless of the status of `-Z randomize-layout`
388             }
389         }
390
391         // inverse_memory_index holds field indices by increasing memory offset.
392         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
393         // We now write field offsets to the corresponding offset slot;
394         // field 5 with offset 0 puts 0 in offsets[5].
395         // At the bottom of this function, we invert `inverse_memory_index` to
396         // produce `memory_index` (see `invert_mapping`).
397
398         let mut sized = true;
399         let mut offsets = vec![Size::ZERO; fields.len()];
400         let mut offset = Size::ZERO;
401         let mut largest_niche = None;
402         let mut largest_niche_available = 0;
403
404         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
405             let prefix_align =
406                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
407             align = align.max(AbiAndPrefAlign::new(prefix_align));
408             offset = prefix_size.align_to(prefix_align);
409         }
410
411         for &i in &inverse_memory_index {
412             let field = fields[i as usize];
413             if !sized {
414                 self.tcx.sess.delay_span_bug(
415                     DUMMY_SP,
416                     &format!(
417                         "univariant: field #{} of `{}` comes after unsized field",
418                         offsets.len(),
419                         ty
420                     ),
421                 );
422             }
423
424             if field.is_unsized() {
425                 sized = false;
426             }
427
428             // Invariant: offset < dl.obj_size_bound() <= 1<<61
429             let field_align = if let Some(pack) = pack {
430                 field.align.min(AbiAndPrefAlign::new(pack))
431             } else {
432                 field.align
433             };
434             offset = offset.align_to(field_align.abi);
435             align = align.max(field_align);
436
437             debug!("univariant offset: {:?} field: {:#?}", offset, field);
438             offsets[i as usize] = offset;
439
440             if let Some(mut niche) = field.largest_niche {
441                 let available = niche.available(dl);
442                 if available > largest_niche_available {
443                     largest_niche_available = available;
444                     niche.offset += offset;
445                     largest_niche = Some(niche);
446                 }
447             }
448
449             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
450         }
451
452         if let Some(repr_align) = repr.align {
453             align = align.max(AbiAndPrefAlign::new(repr_align));
454         }
455
456         debug!("univariant min_size: {:?}", offset);
457         let min_size = offset;
458
459         // As stated above, inverse_memory_index holds field indices by increasing offset.
460         // This makes it an already-sorted view of the offsets vec.
461         // To invert it, consider:
462         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
463         // Field 5 would be the first element, so memory_index is i:
464         // Note: if we didn't optimize, it's already right.
465
466         let memory_index =
467             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
468
469         let size = min_size.align_to(align.abi);
470         let mut abi = Abi::Aggregate { sized };
471
472         // Unpack newtype ABIs and find scalar pairs.
473         if sized && size.bytes() > 0 {
474             // All other fields must be ZSTs.
475             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
476
477             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
478                 // We have exactly one non-ZST field.
479                 (Some((i, field)), None, None) => {
480                     // Field fills the struct and it has a scalar or scalar pair ABI.
481                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
482                     {
483                         match field.abi {
484                             // For plain scalars, or vectors of them, we can't unpack
485                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
486                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
487                                 abi = field.abi;
488                             }
489                             // But scalar pairs are Rust-specific and get
490                             // treated as aggregates by C ABIs anyway.
491                             Abi::ScalarPair(..) => {
492                                 abi = field.abi;
493                             }
494                             _ => {}
495                         }
496                     }
497                 }
498
499                 // Two non-ZST fields, and they're both scalars.
500                 (Some((i, a)), Some((j, b)), None) => {
501                     match (a.abi, b.abi) {
502                         (Abi::Scalar(a), Abi::Scalar(b)) => {
503                             // Order by the memory placement, not source order.
504                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
505                                 ((i, a), (j, b))
506                             } else {
507                                 ((j, b), (i, a))
508                             };
509                             let pair = self.scalar_pair(a, b);
510                             let pair_offsets = match pair.fields {
511                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
512                                     assert_eq!(memory_index, &[0, 1]);
513                                     offsets
514                                 }
515                                 _ => bug!(),
516                             };
517                             if offsets[i] == pair_offsets[0]
518                                 && offsets[j] == pair_offsets[1]
519                                 && align == pair.align
520                                 && size == pair.size
521                             {
522                                 // We can use `ScalarPair` only when it matches our
523                                 // already computed layout (including `#[repr(C)]`).
524                                 abi = pair.abi;
525                             }
526                         }
527                         _ => {}
528                     }
529                 }
530
531                 _ => {}
532             }
533         }
534
535         if fields.iter().any(|f| f.abi.is_uninhabited()) {
536             abi = Abi::Uninhabited;
537         }
538
539         Ok(LayoutS {
540             variants: Variants::Single { index: VariantIdx::new(0) },
541             fields: FieldsShape::Arbitrary { offsets, memory_index },
542             abi,
543             largest_niche,
544             align,
545             size,
546         })
547     }
548
549     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
550         let tcx = self.tcx;
551         let param_env = self.param_env;
552         let dl = self.data_layout();
553         let scalar_unit = |value: Primitive| {
554             let size = value.size(dl);
555             assert!(size.bits() <= 128);
556             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
557         };
558         let scalar =
559             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
560
561         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
562             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
563         };
564         debug_assert!(!ty.has_infer_types_or_consts());
565
566         Ok(match *ty.kind() {
567             // Basic scalars.
568             ty::Bool => tcx.intern_layout(LayoutS::scalar(
569                 self,
570                 Scalar::Initialized {
571                     value: Int(I8, false),
572                     valid_range: WrappingRange { start: 0, end: 1 },
573                 },
574             )),
575             ty::Char => tcx.intern_layout(LayoutS::scalar(
576                 self,
577                 Scalar::Initialized {
578                     value: Int(I32, false),
579                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
580                 },
581             )),
582             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
583             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
584             ty::Float(fty) => scalar(match fty {
585                 ty::FloatTy::F32 => F32,
586                 ty::FloatTy::F64 => F64,
587             }),
588             ty::FnPtr(_) => {
589                 let mut ptr = scalar_unit(Pointer);
590                 ptr.valid_range_mut().start = 1;
591                 tcx.intern_layout(LayoutS::scalar(self, ptr))
592             }
593
594             // The never type.
595             ty::Never => tcx.intern_layout(LayoutS {
596                 variants: Variants::Single { index: VariantIdx::new(0) },
597                 fields: FieldsShape::Primitive,
598                 abi: Abi::Uninhabited,
599                 largest_niche: None,
600                 align: dl.i8_align,
601                 size: Size::ZERO,
602             }),
603
604             // Potentially-wide pointers.
605             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
606                 let mut data_ptr = scalar_unit(Pointer);
607                 if !ty.is_unsafe_ptr() {
608                     data_ptr.valid_range_mut().start = 1;
609                 }
610
611                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
612                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
613                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
614                 }
615
616                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
617                 let metadata = match unsized_part.kind() {
618                     ty::Foreign(..) => {
619                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
620                     }
621                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
622                     ty::Dynamic(..) => {
623                         let mut vtable = scalar_unit(Pointer);
624                         vtable.valid_range_mut().start = 1;
625                         vtable
626                     }
627                     _ => return Err(LayoutError::Unknown(unsized_part)),
628                 };
629
630                 // Effectively a (ptr, meta) tuple.
631                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
632             }
633
634             ty::Dynamic(_, _, ty::DynStar) => {
635                 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
636                 data.valid_range_mut().start = 0;
637                 let mut vtable = scalar_unit(Pointer);
638                 vtable.valid_range_mut().start = 1;
639                 tcx.intern_layout(self.scalar_pair(data, vtable))
640             }
641
642             // Arrays and slices.
643             ty::Array(element, mut count) => {
644                 if count.has_projections() {
645                     count = tcx.normalize_erasing_regions(param_env, count);
646                     if count.has_projections() {
647                         return Err(LayoutError::Unknown(ty));
648                     }
649                 }
650
651                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
652                 let element = self.layout_of(element)?;
653                 let size =
654                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
655
656                 let abi =
657                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
658                         Abi::Uninhabited
659                     } else {
660                         Abi::Aggregate { sized: true }
661                     };
662
663                 let largest_niche = if count != 0 { element.largest_niche } else { None };
664
665                 tcx.intern_layout(LayoutS {
666                     variants: Variants::Single { index: VariantIdx::new(0) },
667                     fields: FieldsShape::Array { stride: element.size, count },
668                     abi,
669                     largest_niche,
670                     align: element.align,
671                     size,
672                 })
673             }
674             ty::Slice(element) => {
675                 let element = self.layout_of(element)?;
676                 tcx.intern_layout(LayoutS {
677                     variants: Variants::Single { index: VariantIdx::new(0) },
678                     fields: FieldsShape::Array { stride: element.size, count: 0 },
679                     abi: Abi::Aggregate { sized: false },
680                     largest_niche: None,
681                     align: element.align,
682                     size: Size::ZERO,
683                 })
684             }
685             ty::Str => tcx.intern_layout(LayoutS {
686                 variants: Variants::Single { index: VariantIdx::new(0) },
687                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
688                 abi: Abi::Aggregate { sized: false },
689                 largest_niche: None,
690                 align: dl.i8_align,
691                 size: Size::ZERO,
692             }),
693
694             // Odd unit types.
695             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
696             ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
697                 let mut unit = self.univariant_uninterned(
698                     ty,
699                     &[],
700                     &ReprOptions::default(),
701                     StructKind::AlwaysSized,
702                 )?;
703                 match unit.abi {
704                     Abi::Aggregate { ref mut sized } => *sized = false,
705                     _ => bug!(),
706                 }
707                 tcx.intern_layout(unit)
708             }
709
710             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
711
712             ty::Closure(_, ref substs) => {
713                 let tys = substs.as_closure().upvar_tys();
714                 univariant(
715                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
716                     &ReprOptions::default(),
717                     StructKind::AlwaysSized,
718                 )?
719             }
720
721             ty::Tuple(tys) => {
722                 let kind =
723                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
724
725                 univariant(
726                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
727                     &ReprOptions::default(),
728                     kind,
729                 )?
730             }
731
732             // SIMD vector types.
733             ty::Adt(def, substs) if def.repr().simd() => {
734                 if !def.is_struct() {
735                     // Should have yielded E0517 by now.
736                     tcx.sess.delay_span_bug(
737                         DUMMY_SP,
738                         "#[repr(simd)] was applied to an ADT that is not a struct",
739                     );
740                     return Err(LayoutError::Unknown(ty));
741                 }
742
743                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
744                 //
745                 // * #[repr(simd)] struct S(T, T, T, T);
746                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
747                 // * #[repr(simd)] struct S([T; 4])
748                 //
749                 // where T is a primitive scalar (integer/float/pointer).
750
751                 // SIMD vectors with zero fields are not supported.
752                 // (should be caught by typeck)
753                 if def.non_enum_variant().fields.is_empty() {
754                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
755                 }
756
757                 // Type of the first ADT field:
758                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
759
760                 // Heterogeneous SIMD vectors are not supported:
761                 // (should be caught by typeck)
762                 for fi in &def.non_enum_variant().fields {
763                     if fi.ty(tcx, substs) != f0_ty {
764                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
765                     }
766                 }
767
768                 // The element type and number of elements of the SIMD vector
769                 // are obtained from:
770                 //
771                 // * the element type and length of the single array field, if
772                 // the first field is of array type, or
773                 //
774                 // * the homogeneous field type and the number of fields.
775                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
776                     // First ADT field is an array:
777
778                     // SIMD vectors with multiple array fields are not supported:
779                     // (should be caught by typeck)
780                     if def.non_enum_variant().fields.len() != 1 {
781                         tcx.sess.fatal(&format!(
782                             "monomorphising SIMD type `{}` with more than one array field",
783                             ty
784                         ));
785                     }
786
787                     // Extract the number of elements from the layout of the array field:
788                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
789                         return Err(LayoutError::Unknown(ty));
790                     };
791
792                     (*e_ty, *count, true)
793                 } else {
794                     // First ADT field is not an array:
795                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
796                 };
797
798                 // SIMD vectors of zero length are not supported.
799                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
800                 // support.
801                 //
802                 // Can't be caught in typeck if the array length is generic.
803                 if e_len == 0 {
804                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
805                 } else if e_len > MAX_SIMD_LANES {
806                     tcx.sess.fatal(&format!(
807                         "monomorphising SIMD type `{}` of length greater than {}",
808                         ty, MAX_SIMD_LANES,
809                     ));
810                 }
811
812                 // Compute the ABI of the element type:
813                 let e_ly = self.layout_of(e_ty)?;
814                 let Abi::Scalar(e_abi) = e_ly.abi else {
815                     // This error isn't caught in typeck, e.g., if
816                     // the element type of the vector is generic.
817                     tcx.sess.fatal(&format!(
818                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
819                         (integer/float/pointer) element type `{}`",
820                         ty, e_ty
821                     ))
822                 };
823
824                 // Compute the size and alignment of the vector:
825                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
826                 let align = dl.vector_align(size);
827                 let size = size.align_to(align.abi);
828
829                 // Compute the placement of the vector fields:
830                 let fields = if is_array {
831                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
832                 } else {
833                     FieldsShape::Array { stride: e_ly.size, count: e_len }
834                 };
835
836                 tcx.intern_layout(LayoutS {
837                     variants: Variants::Single { index: VariantIdx::new(0) },
838                     fields,
839                     abi: Abi::Vector { element: e_abi, count: e_len },
840                     largest_niche: e_ly.largest_niche,
841                     size,
842                     align,
843                 })
844             }
845
846             // ADTs.
847             ty::Adt(def, substs) => {
848                 // Cache the field layouts.
849                 let variants = def
850                     .variants()
851                     .iter()
852                     .map(|v| {
853                         v.fields
854                             .iter()
855                             .map(|field| self.layout_of(field.ty(tcx, substs)))
856                             .collect::<Result<Vec<_>, _>>()
857                     })
858                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
859
860                 if def.is_union() {
861                     if def.repr().pack.is_some() && def.repr().align.is_some() {
862                         self.tcx.sess.delay_span_bug(
863                             tcx.def_span(def.did()),
864                             "union cannot be packed and aligned",
865                         );
866                         return Err(LayoutError::Unknown(ty));
867                     }
868
869                     let mut align =
870                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
871
872                     if let Some(repr_align) = def.repr().align {
873                         align = align.max(AbiAndPrefAlign::new(repr_align));
874                     }
875
876                     let optimize = !def.repr().inhibit_union_abi_opt();
877                     let mut size = Size::ZERO;
878                     let mut abi = Abi::Aggregate { sized: true };
879                     let index = VariantIdx::new(0);
880                     for field in &variants[index] {
881                         assert!(!field.is_unsized());
882                         align = align.max(field.align);
883
884                         // If all non-ZST fields have the same ABI, forward this ABI
885                         if optimize && !field.is_zst() {
886                             // Discard valid range information and allow undef
887                             let field_abi = match field.abi {
888                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
889                                 Abi::ScalarPair(x, y) => {
890                                     Abi::ScalarPair(x.to_union(), y.to_union())
891                                 }
892                                 Abi::Vector { element: x, count } => {
893                                     Abi::Vector { element: x.to_union(), count }
894                                 }
895                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
896                                     Abi::Aggregate { sized: true }
897                                 }
898                             };
899
900                             if size == Size::ZERO {
901                                 // first non ZST: initialize 'abi'
902                                 abi = field_abi;
903                             } else if abi != field_abi {
904                                 // different fields have different ABI: reset to Aggregate
905                                 abi = Abi::Aggregate { sized: true };
906                             }
907                         }
908
909                         size = cmp::max(size, field.size);
910                     }
911
912                     if let Some(pack) = def.repr().pack {
913                         align = align.min(AbiAndPrefAlign::new(pack));
914                     }
915
916                     return Ok(tcx.intern_layout(LayoutS {
917                         variants: Variants::Single { index },
918                         fields: FieldsShape::Union(
919                             NonZeroUsize::new(variants[index].len())
920                                 .ok_or(LayoutError::Unknown(ty))?,
921                         ),
922                         abi,
923                         largest_niche: None,
924                         align,
925                         size: size.align_to(align.abi),
926                     }));
927                 }
928
929                 // A variant is absent if it's uninhabited and only has ZST fields.
930                 // Present uninhabited variants only require space for their fields,
931                 // but *not* an encoding of the discriminant (e.g., a tag value).
932                 // See issue #49298 for more details on the need to leave space
933                 // for non-ZST uninhabited data (mostly partial initialization).
934                 let absent = |fields: &[TyAndLayout<'_>]| {
935                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
936                     let is_zst = fields.iter().all(|f| f.is_zst());
937                     uninhabited && is_zst
938                 };
939                 let (present_first, present_second) = {
940                     let mut present_variants = variants
941                         .iter_enumerated()
942                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
943                     (present_variants.next(), present_variants.next())
944                 };
945                 let present_first = match present_first {
946                     Some(present_first) => present_first,
947                     // Uninhabited because it has no variants, or only absent ones.
948                     None if def.is_enum() => {
949                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
950                     }
951                     // If it's a struct, still compute a layout so that we can still compute the
952                     // field offsets.
953                     None => VariantIdx::new(0),
954                 };
955
956                 let is_struct = !def.is_enum() ||
957                     // Only one variant is present.
958                     (present_second.is_none() &&
959                     // Representation optimizations are allowed.
960                     !def.repr().inhibit_enum_layout_opt());
961                 if is_struct {
962                     // Struct, or univariant enum equivalent to a struct.
963                     // (Typechecking will reject discriminant-sizing attrs.)
964
965                     let v = present_first;
966                     let kind = if def.is_enum() || variants[v].is_empty() {
967                         StructKind::AlwaysSized
968                     } else {
969                         let param_env = tcx.param_env(def.did());
970                         let last_field = def.variant(v).fields.last().unwrap();
971                         let always_sized =
972                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
973                         if !always_sized {
974                             StructKind::MaybeUnsized
975                         } else {
976                             StructKind::AlwaysSized
977                         }
978                     };
979
980                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
981                     st.variants = Variants::Single { index: v };
982
983                     if def.is_unsafe_cell() {
984                         let hide_niches = |scalar: &mut _| match scalar {
985                             Scalar::Initialized { value, valid_range } => {
986                                 *valid_range = WrappingRange::full(value.size(dl))
987                             }
988                             // Already doesn't have any niches
989                             Scalar::Union { .. } => {}
990                         };
991                         match &mut st.abi {
992                             Abi::Uninhabited => {}
993                             Abi::Scalar(scalar) => hide_niches(scalar),
994                             Abi::ScalarPair(a, b) => {
995                                 hide_niches(a);
996                                 hide_niches(b);
997                             }
998                             Abi::Vector { element, count: _ } => hide_niches(element),
999                             Abi::Aggregate { sized: _ } => {}
1000                         }
1001                         st.largest_niche = None;
1002                         return Ok(tcx.intern_layout(st));
1003                     }
1004
1005                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1006                     match st.abi {
1007                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1008                             // the asserts ensure that we are not using the
1009                             // `#[rustc_layout_scalar_valid_range(n)]`
1010                             // attribute to widen the range of anything as that would probably
1011                             // result in UB somewhere
1012                             // FIXME(eddyb) the asserts are probably not needed,
1013                             // as larger validity ranges would result in missed
1014                             // optimizations, *not* wrongly assuming the inner
1015                             // value is valid. e.g. unions enlarge validity ranges,
1016                             // because the values may be uninitialized.
1017                             if let Bound::Included(start) = start {
1018                                 // FIXME(eddyb) this might be incorrect - it doesn't
1019                                 // account for wrap-around (end < start) ranges.
1020                                 let valid_range = scalar.valid_range_mut();
1021                                 assert!(valid_range.start <= start);
1022                                 valid_range.start = start;
1023                             }
1024                             if let Bound::Included(end) = end {
1025                                 // FIXME(eddyb) this might be incorrect - it doesn't
1026                                 // account for wrap-around (end < start) ranges.
1027                                 let valid_range = scalar.valid_range_mut();
1028                                 assert!(valid_range.end >= end);
1029                                 valid_range.end = end;
1030                             }
1031
1032                             // Update `largest_niche` if we have introduced a larger niche.
1033                             let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1034                             if let Some(niche) = niche {
1035                                 match st.largest_niche {
1036                                     Some(largest_niche) => {
1037                                         // Replace the existing niche even if they're equal,
1038                                         // because this one is at a lower offset.
1039                                         if largest_niche.available(dl) <= niche.available(dl) {
1040                                             st.largest_niche = Some(niche);
1041                                         }
1042                                     }
1043                                     None => st.largest_niche = Some(niche),
1044                                 }
1045                             }
1046                         }
1047                         _ => assert!(
1048                             start == Bound::Unbounded && end == Bound::Unbounded,
1049                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1050                             def,
1051                             st,
1052                         ),
1053                     }
1054
1055                     return Ok(tcx.intern_layout(st));
1056                 }
1057
1058                 // At this point, we have handled all unions and
1059                 // structs. (We have also handled univariant enums
1060                 // that allow representation optimization.)
1061                 assert!(def.is_enum());
1062
1063                 // Until we've decided whether to use the tagged or
1064                 // niche filling LayoutS, we don't want to intern the
1065                 // variant layouts, so we can't store them in the
1066                 // overall LayoutS. Store the overall LayoutS
1067                 // and the variant LayoutSs here until then.
1068                 struct TmpLayout<'tcx> {
1069                     layout: LayoutS<'tcx>,
1070                     variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1071                 }
1072
1073                 let calculate_niche_filling_layout =
1074                     || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1075                         // The current code for niche-filling relies on variant indices
1076                         // instead of actual discriminants, so enums with
1077                         // explicit discriminants (RFC #2363) would misbehave.
1078                         if def.repr().inhibit_enum_layout_opt()
1079                             || def
1080                                 .variants()
1081                                 .iter_enumerated()
1082                                 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1083                         {
1084                             return Ok(None);
1085                         }
1086
1087                         if variants.len() < 2 {
1088                             return Ok(None);
1089                         }
1090
1091                         let mut align = dl.aggregate_align;
1092                         let mut variant_layouts = variants
1093                             .iter_enumerated()
1094                             .map(|(j, v)| {
1095                                 let mut st = self.univariant_uninterned(
1096                                     ty,
1097                                     v,
1098                                     &def.repr(),
1099                                     StructKind::AlwaysSized,
1100                                 )?;
1101                                 st.variants = Variants::Single { index: j };
1102
1103                                 align = align.max(st.align);
1104
1105                                 Ok(st)
1106                             })
1107                             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1108
1109                         let largest_variant_index = match variant_layouts
1110                             .iter_enumerated()
1111                             .max_by_key(|(_i, layout)| layout.size.bytes())
1112                             .map(|(i, _layout)| i)
1113                         {
1114                             None => return Ok(None),
1115                             Some(i) => i,
1116                         };
1117
1118                         let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1119                         let needs_disc = |index: VariantIdx| {
1120                             index != largest_variant_index && !absent(&variants[index])
1121                         };
1122                         let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1123                             ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1124
1125                         let count = niche_variants.size_hint().1.unwrap() as u128;
1126
1127                         // Find the field with the largest niche
1128                         let (field_index, niche, (niche_start, niche_scalar)) = match variants
1129                             [largest_variant_index]
1130                             .iter()
1131                             .enumerate()
1132                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1133                             .max_by_key(|(_, niche)| niche.available(dl))
1134                             .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1135                         {
1136                             None => return Ok(None),
1137                             Some(x) => x,
1138                         };
1139
1140                         let niche_offset = niche.offset
1141                             + variant_layouts[largest_variant_index].fields.offset(field_index);
1142                         let niche_size = niche.value.size(dl);
1143                         let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1144
1145                         let all_variants_fit =
1146                             variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1147                                 if i == largest_variant_index {
1148                                     return true;
1149                                 }
1150
1151                                 layout.largest_niche = None;
1152
1153                                 if layout.size <= niche_offset {
1154                                     // This variant will fit before the niche.
1155                                     return true;
1156                                 }
1157
1158                                 // Determine if it'll fit after the niche.
1159                                 let this_align = layout.align.abi;
1160                                 let this_offset = (niche_offset + niche_size).align_to(this_align);
1161
1162                                 if this_offset + layout.size > size {
1163                                     return false;
1164                                 }
1165
1166                                 // It'll fit, but we need to make some adjustments.
1167                                 match layout.fields {
1168                                     FieldsShape::Arbitrary { ref mut offsets, .. } => {
1169                                         for (j, offset) in offsets.iter_mut().enumerate() {
1170                                             if !variants[i][j].is_zst() {
1171                                                 *offset += this_offset;
1172                                             }
1173                                         }
1174                                     }
1175                                     _ => {
1176                                         panic!("Layout of fields should be Arbitrary for variants")
1177                                     }
1178                                 }
1179
1180                                 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1181                                 if !layout.abi.is_uninhabited() {
1182                                     layout.abi = Abi::Aggregate { sized: true };
1183                                 }
1184                                 layout.size += this_offset;
1185
1186                                 true
1187                             });
1188
1189                         if !all_variants_fit {
1190                             return Ok(None);
1191                         }
1192
1193                         let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1194
1195                         let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1196                             i == largest_variant_index || layout.size == Size::ZERO
1197                         });
1198                         let same_size = size == variant_layouts[largest_variant_index].size;
1199                         let same_align = align == variant_layouts[largest_variant_index].align;
1200
1201                         let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1202                             Abi::Uninhabited
1203                         } else if same_size && same_align && others_zst {
1204                             match variant_layouts[largest_variant_index].abi {
1205                                 // When the total alignment and size match, we can use the
1206                                 // same ABI as the scalar variant with the reserved niche.
1207                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1208                                 Abi::ScalarPair(first, second) => {
1209                                     // Only the niche is guaranteed to be initialised,
1210                                     // so use union layouts for the other primitive.
1211                                     if niche_offset == Size::ZERO {
1212                                         Abi::ScalarPair(niche_scalar, second.to_union())
1213                                     } else {
1214                                         Abi::ScalarPair(first.to_union(), niche_scalar)
1215                                     }
1216                                 }
1217                                 _ => Abi::Aggregate { sized: true },
1218                             }
1219                         } else {
1220                             Abi::Aggregate { sized: true }
1221                         };
1222
1223                         let layout = LayoutS {
1224                             variants: Variants::Multiple {
1225                                 tag: niche_scalar,
1226                                 tag_encoding: TagEncoding::Niche {
1227                                     untagged_variant: largest_variant_index,
1228                                     niche_variants,
1229                                     niche_start,
1230                                 },
1231                                 tag_field: 0,
1232                                 variants: IndexVec::new(),
1233                             },
1234                             fields: FieldsShape::Arbitrary {
1235                                 offsets: vec![niche_offset],
1236                                 memory_index: vec![0],
1237                             },
1238                             abi,
1239                             largest_niche,
1240                             size,
1241                             align,
1242                         };
1243
1244                         Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1245                     };
1246
1247                 let niche_filling_layout = calculate_niche_filling_layout()?;
1248
1249                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1250                 let discr_type = def.repr().discr_type();
1251                 let bits = Integer::from_attr(self, discr_type).size().bits();
1252                 for (i, discr) in def.discriminants(tcx) {
1253                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1254                         continue;
1255                     }
1256                     let mut x = discr.val as i128;
1257                     if discr_type.is_signed() {
1258                         // sign extend the raw representation to be an i128
1259                         x = (x << (128 - bits)) >> (128 - bits);
1260                     }
1261                     if x < min {
1262                         min = x;
1263                     }
1264                     if x > max {
1265                         max = x;
1266                     }
1267                 }
1268                 // We might have no inhabited variants, so pretend there's at least one.
1269                 if (min, max) == (i128::MAX, i128::MIN) {
1270                     min = 0;
1271                     max = 0;
1272                 }
1273                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1274                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1275
1276                 let mut align = dl.aggregate_align;
1277                 let mut size = Size::ZERO;
1278
1279                 // We're interested in the smallest alignment, so start large.
1280                 let mut start_align = Align::from_bytes(256).unwrap();
1281                 assert_eq!(Integer::for_align(dl, start_align), None);
1282
1283                 // repr(C) on an enum tells us to make a (tag, union) layout,
1284                 // so we need to grow the prefix alignment to be at least
1285                 // the alignment of the union. (This value is used both for
1286                 // determining the alignment of the overall enum, and the
1287                 // determining the alignment of the payload after the tag.)
1288                 let mut prefix_align = min_ity.align(dl).abi;
1289                 if def.repr().c() {
1290                     for fields in &variants {
1291                         for field in fields {
1292                             prefix_align = prefix_align.max(field.align.abi);
1293                         }
1294                     }
1295                 }
1296
1297                 // Create the set of structs that represent each variant.
1298                 let mut layout_variants = variants
1299                     .iter_enumerated()
1300                     .map(|(i, field_layouts)| {
1301                         let mut st = self.univariant_uninterned(
1302                             ty,
1303                             &field_layouts,
1304                             &def.repr(),
1305                             StructKind::Prefixed(min_ity.size(), prefix_align),
1306                         )?;
1307                         st.variants = Variants::Single { index: i };
1308                         // Find the first field we can't move later
1309                         // to make room for a larger discriminant.
1310                         for field in
1311                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1312                         {
1313                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1314                                 start_align = start_align.min(field.align.abi);
1315                                 break;
1316                             }
1317                         }
1318                         size = cmp::max(size, st.size);
1319                         align = align.max(st.align);
1320                         Ok(st)
1321                     })
1322                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1323
1324                 // Align the maximum variant size to the largest alignment.
1325                 size = size.align_to(align.abi);
1326
1327                 if size.bytes() >= dl.obj_size_bound() {
1328                     return Err(LayoutError::SizeOverflow(ty));
1329                 }
1330
1331                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1332                 if typeck_ity < min_ity {
1333                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1334                     // some reason at this point (based on values discriminant can take on). Mostly
1335                     // because this discriminant will be loaded, and then stored into variable of
1336                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1337                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1338                     // discriminant values. That would be a bug, because then, in codegen, in order
1339                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1340                     // space necessary to represent would have to be discarded (or layout is wrong
1341                     // on thinking it needs 16 bits)
1342                     bug!(
1343                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1344                         min_ity,
1345                         typeck_ity
1346                     );
1347                     // However, it is fine to make discr type however large (as an optimisation)
1348                     // after this point â€“ we’ll just truncate the value we load in codegen.
1349                 }
1350
1351                 // Check to see if we should use a different type for the
1352                 // discriminant. We can safely use a type with the same size
1353                 // as the alignment of the first field of each variant.
1354                 // We increase the size of the discriminant to avoid LLVM copying
1355                 // padding when it doesn't need to. This normally causes unaligned
1356                 // load/stores and excessive memcpy/memset operations. By using a
1357                 // bigger integer size, LLVM can be sure about its contents and
1358                 // won't be so conservative.
1359
1360                 // Use the initial field alignment
1361                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1362                     min_ity
1363                 } else {
1364                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1365                 };
1366
1367                 // If the alignment is not larger than the chosen discriminant size,
1368                 // don't use the alignment as the final size.
1369                 if ity <= min_ity {
1370                     ity = min_ity;
1371                 } else {
1372                     // Patch up the variants' first few fields.
1373                     let old_ity_size = min_ity.size();
1374                     let new_ity_size = ity.size();
1375                     for variant in &mut layout_variants {
1376                         match variant.fields {
1377                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1378                                 for i in offsets {
1379                                     if *i <= old_ity_size {
1380                                         assert_eq!(*i, old_ity_size);
1381                                         *i = new_ity_size;
1382                                     }
1383                                 }
1384                                 // We might be making the struct larger.
1385                                 if variant.size <= old_ity_size {
1386                                     variant.size = new_ity_size;
1387                                 }
1388                             }
1389                             _ => bug!(),
1390                         }
1391                     }
1392                 }
1393
1394                 let tag_mask = ity.size().unsigned_int_max();
1395                 let tag = Scalar::Initialized {
1396                     value: Int(ity, signed),
1397                     valid_range: WrappingRange {
1398                         start: (min as u128 & tag_mask),
1399                         end: (max as u128 & tag_mask),
1400                     },
1401                 };
1402                 let mut abi = Abi::Aggregate { sized: true };
1403
1404                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1405                     abi = Abi::Uninhabited;
1406                 } else if tag.size(dl) == size {
1407                     // Make sure we only use scalar layout when the enum is entirely its
1408                     // own tag (i.e. it has no padding nor any non-ZST variant fields).
1409                     abi = Abi::Scalar(tag);
1410                 } else {
1411                     // Try to use a ScalarPair for all tagged enums.
1412                     let mut common_prim = None;
1413                     let mut common_prim_initialized_in_all_variants = true;
1414                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1415                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1416                             bug!();
1417                         };
1418                         let mut fields =
1419                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1420                         let (field, offset) = match (fields.next(), fields.next()) {
1421                             (None, None) => {
1422                                 common_prim_initialized_in_all_variants = false;
1423                                 continue;
1424                             }
1425                             (Some(pair), None) => pair,
1426                             _ => {
1427                                 common_prim = None;
1428                                 break;
1429                             }
1430                         };
1431                         let prim = match field.abi {
1432                             Abi::Scalar(scalar) => {
1433                                 common_prim_initialized_in_all_variants &=
1434                                     matches!(scalar, Scalar::Initialized { .. });
1435                                 scalar.primitive()
1436                             }
1437                             _ => {
1438                                 common_prim = None;
1439                                 break;
1440                             }
1441                         };
1442                         if let Some(pair) = common_prim {
1443                             // This is pretty conservative. We could go fancier
1444                             // by conflating things like i32 and u32, or even
1445                             // realising that (u8, u8) could just cohabit with
1446                             // u16 or even u32.
1447                             if pair != (prim, offset) {
1448                                 common_prim = None;
1449                                 break;
1450                             }
1451                         } else {
1452                             common_prim = Some((prim, offset));
1453                         }
1454                     }
1455                     if let Some((prim, offset)) = common_prim {
1456                         let prim_scalar = if common_prim_initialized_in_all_variants {
1457                             scalar_unit(prim)
1458                         } else {
1459                             // Common prim might be uninit.
1460                             Scalar::Union { value: prim }
1461                         };
1462                         let pair = self.scalar_pair(tag, prim_scalar);
1463                         let pair_offsets = match pair.fields {
1464                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1465                                 assert_eq!(memory_index, &[0, 1]);
1466                                 offsets
1467                             }
1468                             _ => bug!(),
1469                         };
1470                         if pair_offsets[0] == Size::ZERO
1471                             && pair_offsets[1] == *offset
1472                             && align == pair.align
1473                             && size == pair.size
1474                         {
1475                             // We can use `ScalarPair` only when it matches our
1476                             // already computed layout (including `#[repr(C)]`).
1477                             abi = pair.abi;
1478                         }
1479                     }
1480                 }
1481
1482                 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1483                 // variants to ensure they are consistent. This is because a downcast is
1484                 // semantically a NOP, and thus should not affect layout.
1485                 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1486                     for variant in &mut layout_variants {
1487                         // We only do this for variants with fields; the others are not accessed anyway.
1488                         // Also do not overwrite any already existing "clever" ABIs.
1489                         if variant.fields.count() > 0
1490                             && matches!(variant.abi, Abi::Aggregate { .. })
1491                         {
1492                             variant.abi = abi;
1493                             // Also need to bump up the size and alignment, so that the entire value fits in here.
1494                             variant.size = cmp::max(variant.size, size);
1495                             variant.align.abi = cmp::max(variant.align.abi, align.abi);
1496                         }
1497                     }
1498                 }
1499
1500                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1501
1502                 let tagged_layout = LayoutS {
1503                     variants: Variants::Multiple {
1504                         tag,
1505                         tag_encoding: TagEncoding::Direct,
1506                         tag_field: 0,
1507                         variants: IndexVec::new(),
1508                     },
1509                     fields: FieldsShape::Arbitrary {
1510                         offsets: vec![Size::ZERO],
1511                         memory_index: vec![0],
1512                     },
1513                     largest_niche,
1514                     abi,
1515                     align,
1516                     size,
1517                 };
1518
1519                 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1520
1521                 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1522                     (tl, Some(nl)) => {
1523                         // Pick the smaller layout; otherwise,
1524                         // pick the layout with the larger niche; otherwise,
1525                         // pick tagged as it has simpler codegen.
1526                         use Ordering::*;
1527                         let niche_size = |tmp_l: &TmpLayout<'_>| {
1528                             tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1529                         };
1530                         match (
1531                             tl.layout.size.cmp(&nl.layout.size),
1532                             niche_size(&tl).cmp(&niche_size(&nl)),
1533                         ) {
1534                             (Greater, _) => nl,
1535                             (Equal, Less) => nl,
1536                             _ => tl,
1537                         }
1538                     }
1539                     (tl, None) => tl,
1540                 };
1541
1542                 // Now we can intern the variant layouts and store them in the enum layout.
1543                 best_layout.layout.variants = match best_layout.layout.variants {
1544                     Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1545                         tag,
1546                         tag_encoding,
1547                         tag_field,
1548                         variants: best_layout
1549                             .variants
1550                             .into_iter()
1551                             .map(|layout| tcx.intern_layout(layout))
1552                             .collect(),
1553                     },
1554                     _ => bug!(),
1555                 };
1556
1557                 tcx.intern_layout(best_layout.layout)
1558             }
1559
1560             // Types with no meaningful known layout.
1561             ty::Projection(_) | ty::Opaque(..) => {
1562                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1563                 // if that was possible, so there's no reason to try again here.
1564                 return Err(LayoutError::Unknown(ty));
1565             }
1566
1567             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1568                 bug!("Layout::compute: unexpected type `{}`", ty)
1569             }
1570
1571             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1572                 return Err(LayoutError::Unknown(ty));
1573             }
1574         })
1575     }
1576 }
1577
1578 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1579 #[derive(Clone, Debug, PartialEq)]
1580 enum SavedLocalEligibility {
1581     Unassigned,
1582     Assigned(VariantIdx),
1583     // FIXME: Use newtype_index so we aren't wasting bytes
1584     Ineligible(Option<u32>),
1585 }
1586
1587 // When laying out generators, we divide our saved local fields into two
1588 // categories: overlap-eligible and overlap-ineligible.
1589 //
1590 // Those fields which are ineligible for overlap go in a "prefix" at the
1591 // beginning of the layout, and always have space reserved for them.
1592 //
1593 // Overlap-eligible fields are only assigned to one variant, so we lay
1594 // those fields out for each variant and put them right after the
1595 // prefix.
1596 //
1597 // Finally, in the layout details, we point to the fields from the
1598 // variants they are assigned to. It is possible for some fields to be
1599 // included in multiple variants. No field ever "moves around" in the
1600 // layout; its offset is always the same.
1601 //
1602 // Also included in the layout are the upvars and the discriminant.
1603 // These are included as fields on the "outer" layout; they are not part
1604 // of any variant.
1605 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1606     /// Compute the eligibility and assignment of each local.
1607     fn generator_saved_local_eligibility(
1608         &self,
1609         info: &GeneratorLayout<'tcx>,
1610     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1611         use SavedLocalEligibility::*;
1612
1613         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1614             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1615
1616         // The saved locals not eligible for overlap. These will get
1617         // "promoted" to the prefix of our generator.
1618         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1619
1620         // Figure out which of our saved locals are fields in only
1621         // one variant. The rest are deemed ineligible for overlap.
1622         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1623             for local in fields {
1624                 match assignments[*local] {
1625                     Unassigned => {
1626                         assignments[*local] = Assigned(variant_index);
1627                     }
1628                     Assigned(idx) => {
1629                         // We've already seen this local at another suspension
1630                         // point, so it is no longer a candidate.
1631                         trace!(
1632                             "removing local {:?} in >1 variant ({:?}, {:?})",
1633                             local,
1634                             variant_index,
1635                             idx
1636                         );
1637                         ineligible_locals.insert(*local);
1638                         assignments[*local] = Ineligible(None);
1639                     }
1640                     Ineligible(_) => {}
1641                 }
1642             }
1643         }
1644
1645         // Next, check every pair of eligible locals to see if they
1646         // conflict.
1647         for local_a in info.storage_conflicts.rows() {
1648             let conflicts_a = info.storage_conflicts.count(local_a);
1649             if ineligible_locals.contains(local_a) {
1650                 continue;
1651             }
1652
1653             for local_b in info.storage_conflicts.iter(local_a) {
1654                 // local_a and local_b are storage live at the same time, therefore they
1655                 // cannot overlap in the generator layout. The only way to guarantee
1656                 // this is if they are in the same variant, or one is ineligible
1657                 // (which means it is stored in every variant).
1658                 if ineligible_locals.contains(local_b)
1659                     || assignments[local_a] == assignments[local_b]
1660                 {
1661                     continue;
1662                 }
1663
1664                 // If they conflict, we will choose one to make ineligible.
1665                 // This is not always optimal; it's just a greedy heuristic that
1666                 // seems to produce good results most of the time.
1667                 let conflicts_b = info.storage_conflicts.count(local_b);
1668                 let (remove, other) =
1669                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1670                 ineligible_locals.insert(remove);
1671                 assignments[remove] = Ineligible(None);
1672                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1673             }
1674         }
1675
1676         // Count the number of variants in use. If only one of them, then it is
1677         // impossible to overlap any locals in our layout. In this case it's
1678         // always better to make the remaining locals ineligible, so we can
1679         // lay them out with the other locals in the prefix and eliminate
1680         // unnecessary padding bytes.
1681         {
1682             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1683             for assignment in &assignments {
1684                 if let Assigned(idx) = assignment {
1685                     used_variants.insert(*idx);
1686                 }
1687             }
1688             if used_variants.count() < 2 {
1689                 for assignment in assignments.iter_mut() {
1690                     *assignment = Ineligible(None);
1691                 }
1692                 ineligible_locals.insert_all();
1693             }
1694         }
1695
1696         // Write down the order of our locals that will be promoted to the prefix.
1697         {
1698             for (idx, local) in ineligible_locals.iter().enumerate() {
1699                 assignments[local] = Ineligible(Some(idx as u32));
1700             }
1701         }
1702         debug!("generator saved local assignments: {:?}", assignments);
1703
1704         (ineligible_locals, assignments)
1705     }
1706
1707     /// Compute the full generator layout.
1708     fn generator_layout(
1709         &self,
1710         ty: Ty<'tcx>,
1711         def_id: hir::def_id::DefId,
1712         substs: SubstsRef<'tcx>,
1713     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1714         use SavedLocalEligibility::*;
1715         let tcx = self.tcx;
1716         let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1717
1718         let Some(info) = tcx.generator_layout(def_id) else {
1719             return Err(LayoutError::Unknown(ty));
1720         };
1721         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1722
1723         // Build a prefix layout, including "promoting" all ineligible
1724         // locals as part of the prefix. We compute the layout of all of
1725         // these fields at once to get optimal packing.
1726         let tag_index = substs.as_generator().prefix_tys().count();
1727
1728         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1729         let max_discr = (info.variant_fields.len() - 1) as u128;
1730         let discr_int = Integer::fit_unsigned(max_discr);
1731         let discr_int_ty = discr_int.to_ty(tcx, false);
1732         let tag = Scalar::Initialized {
1733             value: Primitive::Int(discr_int, false),
1734             valid_range: WrappingRange { start: 0, end: max_discr },
1735         };
1736         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1737         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1738
1739         let promoted_layouts = ineligible_locals
1740             .iter()
1741             .map(|local| subst_field(info.field_tys[local]))
1742             .map(|ty| tcx.mk_maybe_uninit(ty))
1743             .map(|ty| self.layout_of(ty));
1744         let prefix_layouts = substs
1745             .as_generator()
1746             .prefix_tys()
1747             .map(|ty| self.layout_of(ty))
1748             .chain(iter::once(Ok(tag_layout)))
1749             .chain(promoted_layouts)
1750             .collect::<Result<Vec<_>, _>>()?;
1751         let prefix = self.univariant_uninterned(
1752             ty,
1753             &prefix_layouts,
1754             &ReprOptions::default(),
1755             StructKind::AlwaysSized,
1756         )?;
1757
1758         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1759
1760         // Split the prefix layout into the "outer" fields (upvars and
1761         // discriminant) and the "promoted" fields. Promoted fields will
1762         // get included in each variant that requested them in
1763         // GeneratorLayout.
1764         debug!("prefix = {:#?}", prefix);
1765         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1766             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1767                 let mut inverse_memory_index = invert_mapping(&memory_index);
1768
1769                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1770                 // "outer" and "promoted" fields respectively.
1771                 let b_start = (tag_index + 1) as u32;
1772                 let offsets_b = offsets.split_off(b_start as usize);
1773                 let offsets_a = offsets;
1774
1775                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1776                 // by preserving the order but keeping only one disjoint "half" each.
1777                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1778                 let inverse_memory_index_b: Vec<_> =
1779                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1780                 inverse_memory_index.retain(|&i| i < b_start);
1781                 let inverse_memory_index_a = inverse_memory_index;
1782
1783                 // Since `inverse_memory_index_{a,b}` each only refer to their
1784                 // respective fields, they can be safely inverted
1785                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1786                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1787
1788                 let outer_fields =
1789                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1790                 (outer_fields, offsets_b, memory_index_b)
1791             }
1792             _ => bug!(),
1793         };
1794
1795         let mut size = prefix.size;
1796         let mut align = prefix.align;
1797         let variants = info
1798             .variant_fields
1799             .iter_enumerated()
1800             .map(|(index, variant_fields)| {
1801                 // Only include overlap-eligible fields when we compute our variant layout.
1802                 let variant_only_tys = variant_fields
1803                     .iter()
1804                     .filter(|local| match assignments[**local] {
1805                         Unassigned => bug!(),
1806                         Assigned(v) if v == index => true,
1807                         Assigned(_) => bug!("assignment does not match variant"),
1808                         Ineligible(_) => false,
1809                     })
1810                     .map(|local| subst_field(info.field_tys[*local]));
1811
1812                 let mut variant = self.univariant_uninterned(
1813                     ty,
1814                     &variant_only_tys
1815                         .map(|ty| self.layout_of(ty))
1816                         .collect::<Result<Vec<_>, _>>()?,
1817                     &ReprOptions::default(),
1818                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1819                 )?;
1820                 variant.variants = Variants::Single { index };
1821
1822                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1823                     bug!();
1824                 };
1825
1826                 // Now, stitch the promoted and variant-only fields back together in
1827                 // the order they are mentioned by our GeneratorLayout.
1828                 // Because we only use some subset (that can differ between variants)
1829                 // of the promoted fields, we can't just pick those elements of the
1830                 // `promoted_memory_index` (as we'd end up with gaps).
1831                 // So instead, we build an "inverse memory_index", as if all of the
1832                 // promoted fields were being used, but leave the elements not in the
1833                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1834                 // obtain a valid (bijective) mapping.
1835                 const INVALID_FIELD_IDX: u32 = !0;
1836                 let mut combined_inverse_memory_index =
1837                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1838                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1839                 let combined_offsets = variant_fields
1840                     .iter()
1841                     .enumerate()
1842                     .map(|(i, local)| {
1843                         let (offset, memory_index) = match assignments[*local] {
1844                             Unassigned => bug!(),
1845                             Assigned(_) => {
1846                                 let (offset, memory_index) =
1847                                     offsets_and_memory_index.next().unwrap();
1848                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1849                             }
1850                             Ineligible(field_idx) => {
1851                                 let field_idx = field_idx.unwrap() as usize;
1852                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1853                             }
1854                         };
1855                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1856                         offset
1857                     })
1858                     .collect();
1859
1860                 // Remove the unused slots and invert the mapping to obtain the
1861                 // combined `memory_index` (also see previous comment).
1862                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1863                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1864
1865                 variant.fields = FieldsShape::Arbitrary {
1866                     offsets: combined_offsets,
1867                     memory_index: combined_memory_index,
1868                 };
1869
1870                 size = size.max(variant.size);
1871                 align = align.max(variant.align);
1872                 Ok(tcx.intern_layout(variant))
1873             })
1874             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1875
1876         size = size.align_to(align.abi);
1877
1878         let abi =
1879             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1880                 Abi::Uninhabited
1881             } else {
1882                 Abi::Aggregate { sized: true }
1883             };
1884
1885         let layout = tcx.intern_layout(LayoutS {
1886             variants: Variants::Multiple {
1887                 tag,
1888                 tag_encoding: TagEncoding::Direct,
1889                 tag_field: tag_index,
1890                 variants,
1891             },
1892             fields: outer_fields,
1893             abi,
1894             largest_niche: prefix.largest_niche,
1895             size,
1896             align,
1897         });
1898         debug!("generator layout ({:?}): {:#?}", ty, layout);
1899         Ok(layout)
1900     }
1901
1902     /// This is invoked by the `layout_of` query to record the final
1903     /// layout of each type.
1904     #[inline(always)]
1905     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1906         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1907         // for dumping later.
1908         if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1909             self.record_layout_for_printing_outlined(layout)
1910         }
1911     }
1912
1913     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1914         // Ignore layouts that are done with non-empty environments or
1915         // non-monomorphic layouts, as the user only wants to see the stuff
1916         // resulting from the final codegen session.
1917         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1918             return;
1919         }
1920
1921         // (delay format until we actually need it)
1922         let record = |kind, packed, opt_discr_size, variants| {
1923             let type_desc = format!("{:?}", layout.ty);
1924             self.tcx.sess.code_stats.record_type_size(
1925                 kind,
1926                 type_desc,
1927                 layout.align.abi,
1928                 layout.size,
1929                 packed,
1930                 opt_discr_size,
1931                 variants,
1932             );
1933         };
1934
1935         let adt_def = match *layout.ty.kind() {
1936             ty::Adt(ref adt_def, _) => {
1937                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1938                 adt_def
1939             }
1940
1941             ty::Closure(..) => {
1942                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1943                 record(DataTypeKind::Closure, false, None, vec![]);
1944                 return;
1945             }
1946
1947             _ => {
1948                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1949                 return;
1950             }
1951         };
1952
1953         let adt_kind = adt_def.adt_kind();
1954         let adt_packed = adt_def.repr().pack.is_some();
1955
1956         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1957             let mut min_size = Size::ZERO;
1958             let field_info: Vec<_> = flds
1959                 .iter()
1960                 .enumerate()
1961                 .map(|(i, &name)| {
1962                     let field_layout = layout.field(self, i);
1963                     let offset = layout.fields.offset(i);
1964                     let field_end = offset + field_layout.size;
1965                     if min_size < field_end {
1966                         min_size = field_end;
1967                     }
1968                     FieldInfo {
1969                         name,
1970                         offset: offset.bytes(),
1971                         size: field_layout.size.bytes(),
1972                         align: field_layout.align.abi.bytes(),
1973                     }
1974                 })
1975                 .collect();
1976
1977             VariantInfo {
1978                 name: n,
1979                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1980                 align: layout.align.abi.bytes(),
1981                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1982                 fields: field_info,
1983             }
1984         };
1985
1986         match layout.variants {
1987             Variants::Single { index } => {
1988                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1989                     debug!(
1990                         "print-type-size `{:#?}` variant {}",
1991                         layout,
1992                         adt_def.variant(index).name
1993                     );
1994                     let variant_def = &adt_def.variant(index);
1995                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1996                     record(
1997                         adt_kind.into(),
1998                         adt_packed,
1999                         None,
2000                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
2001                     );
2002                 } else {
2003                     // (This case arises for *empty* enums; so give it
2004                     // zero variants.)
2005                     record(adt_kind.into(), adt_packed, None, vec![]);
2006                 }
2007             }
2008
2009             Variants::Multiple { tag, ref tag_encoding, .. } => {
2010                 debug!(
2011                     "print-type-size `{:#?}` adt general variants def {}",
2012                     layout.ty,
2013                     adt_def.variants().len()
2014                 );
2015                 let variant_infos: Vec<_> = adt_def
2016                     .variants()
2017                     .iter_enumerated()
2018                     .map(|(i, variant_def)| {
2019                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2020                         build_variant_info(
2021                             Some(variant_def.name),
2022                             &fields,
2023                             layout.for_variant(self, i),
2024                         )
2025                     })
2026                     .collect();
2027                 record(
2028                     adt_kind.into(),
2029                     adt_packed,
2030                     match tag_encoding {
2031                         TagEncoding::Direct => Some(tag.size(self)),
2032                         _ => None,
2033                     },
2034                     variant_infos,
2035                 );
2036             }
2037         }
2038     }
2039 }
2040
2041 /// Type size "skeleton", i.e., the only information determining a type's size.
2042 /// While this is conservative, (aside from constant sizes, only pointers,
2043 /// newtypes thereof and null pointer optimized enums are allowed), it is
2044 /// enough to statically check common use cases of transmute.
2045 #[derive(Copy, Clone, Debug)]
2046 pub enum SizeSkeleton<'tcx> {
2047     /// Any statically computable Layout.
2048     Known(Size),
2049
2050     /// A potentially-fat pointer.
2051     Pointer {
2052         /// If true, this pointer is never null.
2053         non_zero: bool,
2054         /// The type which determines the unsized metadata, if any,
2055         /// of this pointer. Either a type parameter or a projection
2056         /// depending on one, with regions erased.
2057         tail: Ty<'tcx>,
2058     },
2059 }
2060
2061 impl<'tcx> SizeSkeleton<'tcx> {
2062     pub fn compute(
2063         ty: Ty<'tcx>,
2064         tcx: TyCtxt<'tcx>,
2065         param_env: ty::ParamEnv<'tcx>,
2066     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2067         debug_assert!(!ty.has_infer_types_or_consts());
2068
2069         // First try computing a static layout.
2070         let err = match tcx.layout_of(param_env.and(ty)) {
2071             Ok(layout) => {
2072                 return Ok(SizeSkeleton::Known(layout.size));
2073             }
2074             Err(err) => err,
2075         };
2076
2077         match *ty.kind() {
2078             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2079                 let non_zero = !ty.is_unsafe_ptr();
2080                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2081                 match tail.kind() {
2082                     ty::Param(_) | ty::Projection(_) => {
2083                         debug_assert!(tail.has_param_types_or_consts());
2084                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2085                     }
2086                     _ => bug!(
2087                         "SizeSkeleton::compute({}): layout errored ({}), yet \
2088                               tail `{}` is not a type parameter or a projection",
2089                         ty,
2090                         err,
2091                         tail
2092                     ),
2093                 }
2094             }
2095
2096             ty::Adt(def, substs) => {
2097                 // Only newtypes and enums w/ nullable pointer optimization.
2098                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2099                     return Err(err);
2100                 }
2101
2102                 // Get a zero-sized variant or a pointer newtype.
2103                 let zero_or_ptr_variant = |i| {
2104                     let i = VariantIdx::new(i);
2105                     let fields =
2106                         def.variant(i).fields.iter().map(|field| {
2107                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2108                         });
2109                     let mut ptr = None;
2110                     for field in fields {
2111                         let field = field?;
2112                         match field {
2113                             SizeSkeleton::Known(size) => {
2114                                 if size.bytes() > 0 {
2115                                     return Err(err);
2116                                 }
2117                             }
2118                             SizeSkeleton::Pointer { .. } => {
2119                                 if ptr.is_some() {
2120                                     return Err(err);
2121                                 }
2122                                 ptr = Some(field);
2123                             }
2124                         }
2125                     }
2126                     Ok(ptr)
2127                 };
2128
2129                 let v0 = zero_or_ptr_variant(0)?;
2130                 // Newtype.
2131                 if def.variants().len() == 1 {
2132                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2133                         return Ok(SizeSkeleton::Pointer {
2134                             non_zero: non_zero
2135                                 || match tcx.layout_scalar_valid_range(def.did()) {
2136                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2137                                     (Bound::Included(start), Bound::Included(end)) => {
2138                                         0 < start && start < end
2139                                     }
2140                                     _ => false,
2141                                 },
2142                             tail,
2143                         });
2144                     } else {
2145                         return Err(err);
2146                     }
2147                 }
2148
2149                 let v1 = zero_or_ptr_variant(1)?;
2150                 // Nullable pointer enum optimization.
2151                 match (v0, v1) {
2152                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2153                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2154                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2155                     }
2156                     _ => Err(err),
2157                 }
2158             }
2159
2160             ty::Projection(_) | ty::Opaque(..) => {
2161                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2162                 if ty == normalized {
2163                     Err(err)
2164                 } else {
2165                     SizeSkeleton::compute(normalized, tcx, param_env)
2166                 }
2167             }
2168
2169             _ => Err(err),
2170         }
2171     }
2172
2173     pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2174         match (self, other) {
2175             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2176             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2177                 a == b
2178             }
2179             _ => false,
2180         }
2181     }
2182 }
2183
2184 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2185     fn tcx(&self) -> TyCtxt<'tcx>;
2186 }
2187
2188 pub trait HasParamEnv<'tcx> {
2189     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2190 }
2191
2192 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2193     #[inline]
2194     fn data_layout(&self) -> &TargetDataLayout {
2195         &self.data_layout
2196     }
2197 }
2198
2199 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2200     fn target_spec(&self) -> &Target {
2201         &self.sess.target
2202     }
2203 }
2204
2205 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2206     #[inline]
2207     fn tcx(&self) -> TyCtxt<'tcx> {
2208         *self
2209     }
2210 }
2211
2212 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2213     #[inline]
2214     fn data_layout(&self) -> &TargetDataLayout {
2215         &self.data_layout
2216     }
2217 }
2218
2219 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2220     fn target_spec(&self) -> &Target {
2221         &self.sess.target
2222     }
2223 }
2224
2225 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2226     #[inline]
2227     fn tcx(&self) -> TyCtxt<'tcx> {
2228         **self
2229     }
2230 }
2231
2232 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2233     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2234         self.param_env
2235     }
2236 }
2237
2238 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2239     fn data_layout(&self) -> &TargetDataLayout {
2240         self.tcx.data_layout()
2241     }
2242 }
2243
2244 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2245     fn target_spec(&self) -> &Target {
2246         self.tcx.target_spec()
2247     }
2248 }
2249
2250 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2251     fn tcx(&self) -> TyCtxt<'tcx> {
2252         self.tcx.tcx()
2253     }
2254 }
2255
2256 pub trait MaybeResult<T> {
2257     type Error;
2258
2259     fn from(x: Result<T, Self::Error>) -> Self;
2260     fn to_result(self) -> Result<T, Self::Error>;
2261 }
2262
2263 impl<T> MaybeResult<T> for T {
2264     type Error = !;
2265
2266     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2267         x
2268     }
2269     fn to_result(self) -> Result<T, Self::Error> {
2270         Ok(self)
2271     }
2272 }
2273
2274 impl<T, E> MaybeResult<T> for Result<T, E> {
2275     type Error = E;
2276
2277     fn from(x: Result<T, Self::Error>) -> Self {
2278         x
2279     }
2280     fn to_result(self) -> Result<T, Self::Error> {
2281         self
2282     }
2283 }
2284
2285 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2286
2287 /// Trait for contexts that want to be able to compute layouts of types.
2288 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2289 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2290     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2291     /// returned from `layout_of` (see also `handle_layout_err`).
2292     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2293
2294     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2295     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2296     #[inline]
2297     fn layout_tcx_at_span(&self) -> Span {
2298         DUMMY_SP
2299     }
2300
2301     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2302     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2303     ///
2304     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2305     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2306     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2307     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2308     fn handle_layout_err(
2309         &self,
2310         err: LayoutError<'tcx>,
2311         span: Span,
2312         ty: Ty<'tcx>,
2313     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2314 }
2315
2316 /// Blanket extension trait for contexts that can compute layouts of types.
2317 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2318     /// Computes the layout of a type. Note that this implicitly
2319     /// executes in "reveal all" mode, and will normalize the input type.
2320     #[inline]
2321     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2322         self.spanned_layout_of(ty, DUMMY_SP)
2323     }
2324
2325     /// Computes the layout of a type, at `span`. Note that this implicitly
2326     /// executes in "reveal all" mode, and will normalize the input type.
2327     // FIXME(eddyb) avoid passing information like this, and instead add more
2328     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2329     #[inline]
2330     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2331         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2332         let tcx = self.tcx().at(span);
2333
2334         MaybeResult::from(
2335             tcx.layout_of(self.param_env().and(ty))
2336                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2337         )
2338     }
2339 }
2340
2341 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2342
2343 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2344     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2345
2346     #[inline]
2347     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2348         err
2349     }
2350 }
2351
2352 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2353     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2354
2355     #[inline]
2356     fn layout_tcx_at_span(&self) -> Span {
2357         self.tcx.span
2358     }
2359
2360     #[inline]
2361     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2362         err
2363     }
2364 }
2365
2366 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2367 where
2368     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2369 {
2370     fn ty_and_layout_for_variant(
2371         this: TyAndLayout<'tcx>,
2372         cx: &C,
2373         variant_index: VariantIdx,
2374     ) -> TyAndLayout<'tcx> {
2375         let layout = match this.variants {
2376             Variants::Single { index }
2377                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2378                 if index == variant_index &&
2379                 // Don't confuse variants of uninhabited enums with the enum itself.
2380                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2381                 this.fields != FieldsShape::Primitive =>
2382             {
2383                 this.layout
2384             }
2385
2386             Variants::Single { index } => {
2387                 let tcx = cx.tcx();
2388                 let param_env = cx.param_env();
2389
2390                 // Deny calling for_variant more than once for non-Single enums.
2391                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2392                     assert_eq!(original_layout.variants, Variants::Single { index });
2393                 }
2394
2395                 let fields = match this.ty.kind() {
2396                     ty::Adt(def, _) if def.variants().is_empty() =>
2397                         bug!("for_variant called on zero-variant enum"),
2398                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2399                     _ => bug!(),
2400                 };
2401                 tcx.intern_layout(LayoutS {
2402                     variants: Variants::Single { index: variant_index },
2403                     fields: match NonZeroUsize::new(fields) {
2404                         Some(fields) => FieldsShape::Union(fields),
2405                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2406                     },
2407                     abi: Abi::Uninhabited,
2408                     largest_niche: None,
2409                     align: tcx.data_layout.i8_align,
2410                     size: Size::ZERO,
2411                 })
2412             }
2413
2414             Variants::Multiple { ref variants, .. } => variants[variant_index],
2415         };
2416
2417         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2418
2419         TyAndLayout { ty: this.ty, layout }
2420     }
2421
2422     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2423         enum TyMaybeWithLayout<'tcx> {
2424             Ty(Ty<'tcx>),
2425             TyAndLayout(TyAndLayout<'tcx>),
2426         }
2427
2428         fn field_ty_or_layout<'tcx>(
2429             this: TyAndLayout<'tcx>,
2430             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2431             i: usize,
2432         ) -> TyMaybeWithLayout<'tcx> {
2433             let tcx = cx.tcx();
2434             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2435                 TyAndLayout {
2436                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2437                     ty: tag.primitive().to_ty(tcx),
2438                 }
2439             };
2440
2441             match *this.ty.kind() {
2442                 ty::Bool
2443                 | ty::Char
2444                 | ty::Int(_)
2445                 | ty::Uint(_)
2446                 | ty::Float(_)
2447                 | ty::FnPtr(_)
2448                 | ty::Never
2449                 | ty::FnDef(..)
2450                 | ty::GeneratorWitness(..)
2451                 | ty::Foreign(..)
2452                 | ty::Dynamic(_, _, ty::Dyn) => {
2453                     bug!("TyAndLayout::field({:?}): not applicable", this)
2454                 }
2455
2456                 // Potentially-fat pointers.
2457                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2458                     assert!(i < this.fields.count());
2459
2460                     // Reuse the fat `*T` type as its own thin pointer data field.
2461                     // This provides information about, e.g., DST struct pointees
2462                     // (which may have no non-DST form), and will work as long
2463                     // as the `Abi` or `FieldsShape` is checked by users.
2464                     if i == 0 {
2465                         let nil = tcx.mk_unit();
2466                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2467                             tcx.mk_mut_ptr(nil)
2468                         } else {
2469                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2470                         };
2471
2472                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2473                         // the `Result` should always work because the type is
2474                         // always either `*mut ()` or `&'static mut ()`.
2475                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2476                             ty: this.ty,
2477                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2478                         });
2479                     }
2480
2481                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2482                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2483                         ty::Dynamic(_, _, ty::Dyn) => {
2484                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2485                                 tcx.lifetimes.re_static,
2486                                 tcx.mk_array(tcx.types.usize, 3),
2487                             ))
2488                             /* FIXME: use actual fn pointers
2489                             Warning: naively computing the number of entries in the
2490                             vtable by counting the methods on the trait + methods on
2491                             all parent traits does not work, because some methods can
2492                             be not object safe and thus excluded from the vtable.
2493                             Increase this counter if you tried to implement this but
2494                             failed to do it without duplicating a lot of code from
2495                             other places in the compiler: 2
2496                             tcx.mk_tup(&[
2497                                 tcx.mk_array(tcx.types.usize, 3),
2498                                 tcx.mk_array(Option<fn()>),
2499                             ])
2500                             */
2501                         }
2502                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2503                     }
2504                 }
2505
2506                 // Arrays and slices.
2507                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2508                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2509
2510                 // Tuples, generators and closures.
2511                 ty::Closure(_, ref substs) => field_ty_or_layout(
2512                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2513                     cx,
2514                     i,
2515                 ),
2516
2517                 ty::Generator(def_id, ref substs, _) => match this.variants {
2518                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2519                         substs
2520                             .as_generator()
2521                             .state_tys(def_id, tcx)
2522                             .nth(index.as_usize())
2523                             .unwrap()
2524                             .nth(i)
2525                             .unwrap(),
2526                     ),
2527                     Variants::Multiple { tag, tag_field, .. } => {
2528                         if i == tag_field {
2529                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2530                         }
2531                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2532                     }
2533                 },
2534
2535                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2536
2537                 // ADTs.
2538                 ty::Adt(def, substs) => {
2539                     match this.variants {
2540                         Variants::Single { index } => {
2541                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2542                         }
2543
2544                         // Discriminant field for enums (where applicable).
2545                         Variants::Multiple { tag, .. } => {
2546                             assert_eq!(i, 0);
2547                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2548                         }
2549                     }
2550                 }
2551
2552                 ty::Dynamic(_, _, ty::DynStar) => {
2553                     if i == 0 {
2554                         TyMaybeWithLayout::Ty(tcx.types.usize)
2555                     } else if i == 1 {
2556                         // FIXME(dyn-star) same FIXME as above applies here too
2557                         TyMaybeWithLayout::Ty(
2558                             tcx.mk_imm_ref(
2559                                 tcx.lifetimes.re_static,
2560                                 tcx.mk_array(tcx.types.usize, 3),
2561                             ),
2562                         )
2563                     } else {
2564                         bug!("no field {i} on dyn*")
2565                     }
2566                 }
2567
2568                 ty::Projection(_)
2569                 | ty::Bound(..)
2570                 | ty::Placeholder(..)
2571                 | ty::Opaque(..)
2572                 | ty::Param(_)
2573                 | ty::Infer(_)
2574                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2575             }
2576         }
2577
2578         match field_ty_or_layout(this, cx, i) {
2579             TyMaybeWithLayout::Ty(field_ty) => {
2580                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2581                     bug!(
2582                         "failed to get layout for `{}`: {},\n\
2583                          despite it being a field (#{}) of an existing layout: {:#?}",
2584                         field_ty,
2585                         e,
2586                         i,
2587                         this
2588                     )
2589                 })
2590             }
2591             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2592         }
2593     }
2594
2595     fn ty_and_layout_pointee_info_at(
2596         this: TyAndLayout<'tcx>,
2597         cx: &C,
2598         offset: Size,
2599     ) -> Option<PointeeInfo> {
2600         let tcx = cx.tcx();
2601         let param_env = cx.param_env();
2602
2603         let addr_space_of_ty = |ty: Ty<'tcx>| {
2604             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2605         };
2606
2607         let pointee_info = match *this.ty.kind() {
2608             ty::RawPtr(mt) if offset.bytes() == 0 => {
2609                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2610                     size: layout.size,
2611                     align: layout.align.abi,
2612                     safe: None,
2613                     address_space: addr_space_of_ty(mt.ty),
2614                 })
2615             }
2616             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2617                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2618                     size: layout.size,
2619                     align: layout.align.abi,
2620                     safe: None,
2621                     address_space: cx.data_layout().instruction_address_space,
2622                 })
2623             }
2624             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2625                 let address_space = addr_space_of_ty(ty);
2626                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2627                     // Use conservative pointer kind if not optimizing. This saves us the
2628                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2629                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2630                     PointerKind::SharedMutable
2631                 } else {
2632                     match mt {
2633                         hir::Mutability::Not => {
2634                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2635                                 PointerKind::Frozen
2636                             } else {
2637                                 PointerKind::SharedMutable
2638                             }
2639                         }
2640                         hir::Mutability::Mut => {
2641                             // References to self-referential structures should not be considered
2642                             // noalias, as another pointer to the structure can be obtained, that
2643                             // is not based-on the original reference. We consider all !Unpin
2644                             // types to be potentially self-referential here.
2645                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2646                                 PointerKind::UniqueBorrowed
2647                             } else {
2648                                 PointerKind::UniqueBorrowedPinned
2649                             }
2650                         }
2651                     }
2652                 };
2653
2654                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2655                     size: layout.size,
2656                     align: layout.align.abi,
2657                     safe: Some(kind),
2658                     address_space,
2659                 })
2660             }
2661
2662             _ => {
2663                 let mut data_variant = match this.variants {
2664                     // Within the discriminant field, only the niche itself is
2665                     // always initialized, so we only check for a pointer at its
2666                     // offset.
2667                     //
2668                     // If the niche is a pointer, it's either valid (according
2669                     // to its type), or null (which the niche field's scalar
2670                     // validity range encodes).  This allows using
2671                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2672                     // this will continue to work as long as we don't start
2673                     // using more niches than just null (e.g., the first page of
2674                     // the address space, or unaligned pointers).
2675                     Variants::Multiple {
2676                         tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2677                         tag_field,
2678                         ..
2679                     } if this.fields.offset(tag_field) == offset => {
2680                         Some(this.for_variant(cx, untagged_variant))
2681                     }
2682                     _ => Some(this),
2683                 };
2684
2685                 if let Some(variant) = data_variant {
2686                     // We're not interested in any unions.
2687                     if let FieldsShape::Union(_) = variant.fields {
2688                         data_variant = None;
2689                     }
2690                 }
2691
2692                 let mut result = None;
2693
2694                 if let Some(variant) = data_variant {
2695                     let ptr_end = offset + Pointer.size(cx);
2696                     for i in 0..variant.fields.count() {
2697                         let field_start = variant.fields.offset(i);
2698                         if field_start <= offset {
2699                             let field = variant.field(cx, i);
2700                             result = field.to_result().ok().and_then(|field| {
2701                                 if ptr_end <= field_start + field.size {
2702                                     // We found the right field, look inside it.
2703                                     let field_info =
2704                                         field.pointee_info_at(cx, offset - field_start);
2705                                     field_info
2706                                 } else {
2707                                     None
2708                                 }
2709                             });
2710                             if result.is_some() {
2711                                 break;
2712                             }
2713                         }
2714                     }
2715                 }
2716
2717                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2718                 if let Some(ref mut pointee) = result {
2719                     if let ty::Adt(def, _) = this.ty.kind() {
2720                         if def.is_box() && offset.bytes() == 0 {
2721                             pointee.safe = Some(PointerKind::UniqueOwned);
2722                         }
2723                     }
2724                 }
2725
2726                 result
2727             }
2728         };
2729
2730         debug!(
2731             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2732             offset,
2733             this.ty.kind(),
2734             pointee_info
2735         );
2736
2737         pointee_info
2738     }
2739
2740     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2741         matches!(this.ty.kind(), ty::Adt(..))
2742     }
2743
2744     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2745         this.ty.kind() == &ty::Never
2746     }
2747
2748     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2749         matches!(this.ty.kind(), ty::Tuple(..))
2750     }
2751
2752     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2753         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2754     }
2755 }
2756
2757 impl<'tcx> ty::Instance<'tcx> {
2758     // NOTE(eddyb) this is private to avoid using it from outside of
2759     // `fn_abi_of_instance` - any other uses are either too high-level
2760     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2761     // or should go through `FnAbi` instead, to avoid losing any
2762     // adjustments `fn_abi_of_instance` might be performing.
2763     #[tracing::instrument(level = "debug", skip(tcx, param_env))]
2764     fn fn_sig_for_fn_abi(
2765         &self,
2766         tcx: TyCtxt<'tcx>,
2767         param_env: ty::ParamEnv<'tcx>,
2768     ) -> ty::PolyFnSig<'tcx> {
2769         let ty = self.ty(tcx, param_env);
2770         match *ty.kind() {
2771             ty::FnDef(..) => {
2772                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2773                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2774                 // (i.e. due to being inside a projection that got normalized, see
2775                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2776                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2777                 //
2778                 // We normalize the `fn_sig` again after substituting at a later point.
2779                 let mut sig = match *ty.kind() {
2780                     ty::FnDef(def_id, substs) => tcx
2781                         .bound_fn_sig(def_id)
2782                         .map_bound(|fn_sig| {
2783                             tcx.normalize_erasing_regions(tcx.param_env(def_id), fn_sig)
2784                         })
2785                         .subst(tcx, substs),
2786                     _ => unreachable!(),
2787                 };
2788
2789                 if let ty::InstanceDef::VTableShim(..) = self.def {
2790                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2791                     sig = sig.map_bound(|mut sig| {
2792                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2793                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2794                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2795                         sig
2796                     });
2797                 }
2798                 sig
2799             }
2800             ty::Closure(def_id, substs) => {
2801                 let sig = substs.as_closure().sig();
2802
2803                 let bound_vars = tcx.mk_bound_variable_kinds(
2804                     sig.bound_vars()
2805                         .iter()
2806                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2807                 );
2808                 let br = ty::BoundRegion {
2809                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2810                     kind: ty::BoundRegionKind::BrEnv,
2811                 };
2812                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2813                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2814
2815                 let sig = sig.skip_binder();
2816                 ty::Binder::bind_with_vars(
2817                     tcx.mk_fn_sig(
2818                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2819                         sig.output(),
2820                         sig.c_variadic,
2821                         sig.unsafety,
2822                         sig.abi,
2823                     ),
2824                     bound_vars,
2825                 )
2826             }
2827             ty::Generator(_, substs, _) => {
2828                 let sig = substs.as_generator().poly_sig();
2829
2830                 let bound_vars = tcx.mk_bound_variable_kinds(
2831                     sig.bound_vars()
2832                         .iter()
2833                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2834                 );
2835                 let br = ty::BoundRegion {
2836                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2837                     kind: ty::BoundRegionKind::BrEnv,
2838                 };
2839                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2840                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2841
2842                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2843                 let pin_adt_ref = tcx.adt_def(pin_did);
2844                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2845                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2846
2847                 let sig = sig.skip_binder();
2848                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2849                 let state_adt_ref = tcx.adt_def(state_did);
2850                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2851                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2852                 ty::Binder::bind_with_vars(
2853                     tcx.mk_fn_sig(
2854                         [env_ty, sig.resume_ty].iter(),
2855                         &ret_ty,
2856                         false,
2857                         hir::Unsafety::Normal,
2858                         rustc_target::spec::abi::Abi::Rust,
2859                     ),
2860                     bound_vars,
2861                 )
2862             }
2863             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2864         }
2865     }
2866 }
2867
2868 /// Calculates whether a function's ABI can unwind or not.
2869 ///
2870 /// This takes two primary parameters:
2871 ///
2872 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2873 ///   codegen attrs for a defined function. For function pointers this set of
2874 ///   flags is the empty set. This is only applicable for Rust-defined
2875 ///   functions, and generally isn't needed except for small optimizations where
2876 ///   we try to say a function which otherwise might look like it could unwind
2877 ///   doesn't actually unwind (such as for intrinsics and such).
2878 ///
2879 /// * `abi` - this is the ABI that the function is defined with. This is the
2880 ///   primary factor for determining whether a function can unwind or not.
2881 ///
2882 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2883 /// panics are implemented with unwinds on most platform (when
2884 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2885 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2886 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2887 /// defined for each ABI individually, but it always corresponds to some form of
2888 /// stack-based unwinding (the exact mechanism of which varies
2889 /// platform-by-platform).
2890 ///
2891 /// Rust functions are classified whether or not they can unwind based on the
2892 /// active "panic strategy". In other words Rust functions are considered to
2893 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2894 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2895 /// only if the final panic mode is panic=abort. In this scenario any code
2896 /// previously compiled assuming that a function can unwind is still correct, it
2897 /// just never happens to actually unwind at runtime.
2898 ///
2899 /// This function's answer to whether or not a function can unwind is quite
2900 /// impactful throughout the compiler. This affects things like:
2901 ///
2902 /// * Calling a function which can't unwind means codegen simply ignores any
2903 ///   associated unwinding cleanup.
2904 /// * Calling a function which can unwind from a function which can't unwind
2905 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2906 ///   aborts the process.
2907 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2908 ///   affects various optimizations and codegen.
2909 ///
2910 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2911 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2912 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2913 /// might (from a foreign exception or similar).
2914 #[inline]
2915 #[tracing::instrument(level = "debug", skip(tcx))]
2916 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2917     if let Some(did) = fn_def_id {
2918         // Special attribute for functions which can't unwind.
2919         if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2920             return false;
2921         }
2922
2923         // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2924         //
2925         // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2926         // function defined in Rust is also required to abort.
2927         if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2928             return false;
2929         }
2930
2931         // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2932         //
2933         // This is not part of `codegen_fn_attrs` as it can differ between crates
2934         // and therefore cannot be computed in core.
2935         if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2936             if Some(did) == tcx.lang_items().drop_in_place_fn() {
2937                 return false;
2938             }
2939         }
2940     }
2941
2942     // Otherwise if this isn't special then unwinding is generally determined by
2943     // the ABI of the itself. ABIs like `C` have variants which also
2944     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2945     // ABIs have such an option. Otherwise the only other thing here is Rust
2946     // itself, and those ABIs are determined by the panic strategy configured
2947     // for this compilation.
2948     //
2949     // Unfortunately at this time there's also another caveat. Rust [RFC
2950     // 2945][rfc] has been accepted and is in the process of being implemented
2951     // and stabilized. In this interim state we need to deal with historical
2952     // rustc behavior as well as plan for future rustc behavior.
2953     //
2954     // Historically functions declared with `extern "C"` were marked at the
2955     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2956     // or not. This is UB for functions in `panic=unwind` mode that then
2957     // actually panic and unwind. Note that this behavior is true for both
2958     // externally declared functions as well as Rust-defined function.
2959     //
2960     // To fix this UB rustc would like to change in the future to catch unwinds
2961     // from function calls that may unwind within a Rust-defined `extern "C"`
2962     // function and forcibly abort the process, thereby respecting the
2963     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2964     // ready to roll out, so determining whether or not the `C` family of ABIs
2965     // unwinds is conditional not only on their definition but also whether the
2966     // `#![feature(c_unwind)]` feature gate is active.
2967     //
2968     // Note that this means that unlike historical compilers rustc now, by
2969     // default, unconditionally thinks that the `C` ABI may unwind. This will
2970     // prevent some optimization opportunities, however, so we try to scope this
2971     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2972     // to `panic=abort`).
2973     //
2974     // Eventually the check against `c_unwind` here will ideally get removed and
2975     // this'll be a little cleaner as it'll be a straightforward check of the
2976     // ABI.
2977     //
2978     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2979     use SpecAbi::*;
2980     match abi {
2981         C { unwind }
2982         | System { unwind }
2983         | Cdecl { unwind }
2984         | Stdcall { unwind }
2985         | Fastcall { unwind }
2986         | Vectorcall { unwind }
2987         | Thiscall { unwind }
2988         | Aapcs { unwind }
2989         | Win64 { unwind }
2990         | SysV64 { unwind } => {
2991             unwind
2992                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2993         }
2994         PtxKernel
2995         | Msp430Interrupt
2996         | X86Interrupt
2997         | AmdGpuKernel
2998         | EfiApi
2999         | AvrInterrupt
3000         | AvrNonBlockingInterrupt
3001         | CCmseNonSecureCall
3002         | Wasm
3003         | RustIntrinsic
3004         | PlatformIntrinsic
3005         | Unadjusted => false,
3006         Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
3007     }
3008 }
3009
3010 #[inline]
3011 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
3012     use rustc_target::spec::abi::Abi::*;
3013     match tcx.sess.target.adjust_abi(abi) {
3014         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
3015         RustCold => Conv::RustCold,
3016
3017         // It's the ABI's job to select this, not ours.
3018         System { .. } => bug!("system abi should be selected elsewhere"),
3019         EfiApi => bug!("eficall abi should be selected elsewhere"),
3020
3021         Stdcall { .. } => Conv::X86Stdcall,
3022         Fastcall { .. } => Conv::X86Fastcall,
3023         Vectorcall { .. } => Conv::X86VectorCall,
3024         Thiscall { .. } => Conv::X86ThisCall,
3025         C { .. } => Conv::C,
3026         Unadjusted => Conv::C,
3027         Win64 { .. } => Conv::X86_64Win64,
3028         SysV64 { .. } => Conv::X86_64SysV,
3029         Aapcs { .. } => Conv::ArmAapcs,
3030         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
3031         PtxKernel => Conv::PtxKernel,
3032         Msp430Interrupt => Conv::Msp430Intr,
3033         X86Interrupt => Conv::X86Intr,
3034         AmdGpuKernel => Conv::AmdGpuKernel,
3035         AvrInterrupt => Conv::AvrInterrupt,
3036         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3037         Wasm => Conv::C,
3038
3039         // These API constants ought to be more specific...
3040         Cdecl { .. } => Conv::C,
3041     }
3042 }
3043
3044 /// Error produced by attempting to compute or adjust a `FnAbi`.
3045 #[derive(Copy, Clone, Debug, HashStable)]
3046 pub enum FnAbiError<'tcx> {
3047     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3048     Layout(LayoutError<'tcx>),
3049
3050     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3051     AdjustForForeignAbi(call::AdjustForForeignAbiError),
3052 }
3053
3054 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3055     fn from(err: LayoutError<'tcx>) -> Self {
3056         Self::Layout(err)
3057     }
3058 }
3059
3060 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3061     fn from(err: call::AdjustForForeignAbiError) -> Self {
3062         Self::AdjustForForeignAbi(err)
3063     }
3064 }
3065
3066 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3067     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3068         match self {
3069             Self::Layout(err) => err.fmt(f),
3070             Self::AdjustForForeignAbi(err) => err.fmt(f),
3071         }
3072     }
3073 }
3074
3075 impl<'tcx> IntoDiagnostic<'tcx, !> for FnAbiError<'tcx> {
3076     fn into_diagnostic(self, handler: &'tcx Handler) -> DiagnosticBuilder<'tcx, !> {
3077         handler.struct_fatal(self.to_string())
3078     }
3079 }
3080
3081 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3082 // just for error handling.
3083 #[derive(Debug)]
3084 pub enum FnAbiRequest<'tcx> {
3085     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3086     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3087 }
3088
3089 /// Trait for contexts that want to be able to compute `FnAbi`s.
3090 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3091 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3092     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3093     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3094     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3095
3096     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3097     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3098     ///
3099     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3100     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3101     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3102     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3103     fn handle_fn_abi_err(
3104         &self,
3105         err: FnAbiError<'tcx>,
3106         span: Span,
3107         fn_abi_request: FnAbiRequest<'tcx>,
3108     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3109 }
3110
3111 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3112 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3113     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3114     ///
3115     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3116     /// instead, where the instance is an `InstanceDef::Virtual`.
3117     #[inline]
3118     fn fn_abi_of_fn_ptr(
3119         &self,
3120         sig: ty::PolyFnSig<'tcx>,
3121         extra_args: &'tcx ty::List<Ty<'tcx>>,
3122     ) -> Self::FnAbiOfResult {
3123         // FIXME(eddyb) get a better `span` here.
3124         let span = self.layout_tcx_at_span();
3125         let tcx = self.tcx().at(span);
3126
3127         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3128             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3129         ))
3130     }
3131
3132     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3133     /// direct calls to an `fn`.
3134     ///
3135     /// NB: that includes virtual calls, which are represented by "direct calls"
3136     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3137     #[inline]
3138     #[tracing::instrument(level = "debug", skip(self))]
3139     fn fn_abi_of_instance(
3140         &self,
3141         instance: ty::Instance<'tcx>,
3142         extra_args: &'tcx ty::List<Ty<'tcx>>,
3143     ) -> Self::FnAbiOfResult {
3144         // FIXME(eddyb) get a better `span` here.
3145         let span = self.layout_tcx_at_span();
3146         let tcx = self.tcx().at(span);
3147
3148         MaybeResult::from(
3149             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3150                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3151                 // we can get some kind of span even if one wasn't provided.
3152                 // However, we don't do this early in order to avoid calling
3153                 // `def_span` unconditionally (which may have a perf penalty).
3154                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3155                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3156             }),
3157         )
3158     }
3159 }
3160
3161 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3162
3163 fn fn_abi_of_fn_ptr<'tcx>(
3164     tcx: TyCtxt<'tcx>,
3165     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3166 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3167     let (param_env, (sig, extra_args)) = query.into_parts();
3168
3169     LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3170 }
3171
3172 fn fn_abi_of_instance<'tcx>(
3173     tcx: TyCtxt<'tcx>,
3174     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3175 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3176     let (param_env, (instance, extra_args)) = query.into_parts();
3177
3178     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3179
3180     let caller_location = if instance.def.requires_caller_location(tcx) {
3181         Some(tcx.caller_location_ty())
3182     } else {
3183         None
3184     };
3185
3186     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3187         sig,
3188         extra_args,
3189         caller_location,
3190         Some(instance.def_id()),
3191         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3192     )
3193 }
3194
3195 // Handle safe Rust thin and fat pointers.
3196 pub fn adjust_for_rust_scalar<'tcx>(
3197     cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3198     attrs: &mut ArgAttributes,
3199     scalar: Scalar,
3200     layout: TyAndLayout<'tcx>,
3201     offset: Size,
3202     is_return: bool,
3203 ) {
3204     // Booleans are always a noundef i1 that needs to be zero-extended.
3205     if scalar.is_bool() {
3206         attrs.ext(ArgExtension::Zext);
3207         attrs.set(ArgAttribute::NoUndef);
3208         return;
3209     }
3210
3211     // Scalars which have invalid values cannot be undef.
3212     if !scalar.is_always_valid(&cx) {
3213         attrs.set(ArgAttribute::NoUndef);
3214     }
3215
3216     // Only pointer types handled below.
3217     let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3218
3219     if !valid_range.contains(0) {
3220         attrs.set(ArgAttribute::NonNull);
3221     }
3222
3223     if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3224         if let Some(kind) = pointee.safe {
3225             attrs.pointee_align = Some(pointee.align);
3226
3227             // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3228             // for the entire duration of the function as they can be deallocated
3229             // at any time. Same for shared mutable references. If LLVM had a
3230             // way to say "dereferenceable on entry" we could use it here.
3231             attrs.pointee_size = match kind {
3232                 PointerKind::UniqueBorrowed
3233                 | PointerKind::UniqueBorrowedPinned
3234                 | PointerKind::Frozen => pointee.size,
3235                 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3236             };
3237
3238             // `Box`, `&T`, and `&mut T` cannot be undef.
3239             // Note that this only applies to the value of the pointer itself;
3240             // this attribute doesn't make it UB for the pointed-to data to be undef.
3241             attrs.set(ArgAttribute::NoUndef);
3242
3243             // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3244             // `noalias` for it. This can be turned off using an unstable flag.
3245             // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3246             let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3247
3248             // `&mut` pointer parameters never alias other parameters,
3249             // or mutable global data
3250             //
3251             // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3252             // and can be marked as both `readonly` and `noalias`, as
3253             // LLVM's definition of `noalias` is based solely on memory
3254             // dependencies rather than pointer equality
3255             //
3256             // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3257             // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3258             // or not to actually emit the attribute. It can also be controlled with the
3259             // `-Zmutable-noalias` debugging option.
3260             let no_alias = match kind {
3261                 PointerKind::SharedMutable
3262                 | PointerKind::UniqueBorrowed
3263                 | PointerKind::UniqueBorrowedPinned => false,
3264                 PointerKind::UniqueOwned => noalias_for_box,
3265                 PointerKind::Frozen => !is_return,
3266             };
3267             if no_alias {
3268                 attrs.set(ArgAttribute::NoAlias);
3269             }
3270
3271             if kind == PointerKind::Frozen && !is_return {
3272                 attrs.set(ArgAttribute::ReadOnly);
3273             }
3274
3275             if kind == PointerKind::UniqueBorrowed && !is_return {
3276                 attrs.set(ArgAttribute::NoAliasMutRef);
3277             }
3278         }
3279     }
3280 }
3281
3282 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3283     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3284     // arguments of this method, into a separate `struct`.
3285     #[tracing::instrument(
3286         level = "debug",
3287         skip(self, caller_location, fn_def_id, force_thin_self_ptr)
3288     )]
3289     fn fn_abi_new_uncached(
3290         &self,
3291         sig: ty::PolyFnSig<'tcx>,
3292         extra_args: &[Ty<'tcx>],
3293         caller_location: Option<Ty<'tcx>>,
3294         fn_def_id: Option<DefId>,
3295         // FIXME(eddyb) replace this with something typed, like an `enum`.
3296         force_thin_self_ptr: bool,
3297     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3298         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3299
3300         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3301
3302         let mut inputs = sig.inputs();
3303         let extra_args = if sig.abi == RustCall {
3304             assert!(!sig.c_variadic && extra_args.is_empty());
3305
3306             if let Some(input) = sig.inputs().last() {
3307                 if let ty::Tuple(tupled_arguments) = input.kind() {
3308                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3309                     tupled_arguments
3310                 } else {
3311                     bug!(
3312                         "argument to function with \"rust-call\" ABI \
3313                             is not a tuple"
3314                     );
3315                 }
3316             } else {
3317                 bug!(
3318                     "argument to function with \"rust-call\" ABI \
3319                         is not a tuple"
3320                 );
3321             }
3322         } else {
3323             assert!(sig.c_variadic || extra_args.is_empty());
3324             extra_args
3325         };
3326
3327         let target = &self.tcx.sess.target;
3328         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3329         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3330         let linux_s390x_gnu_like =
3331             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3332         let linux_sparc64_gnu_like =
3333             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3334         let linux_powerpc_gnu_like =
3335             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3336         use SpecAbi::*;
3337         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3338
3339         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3340             let span = tracing::debug_span!("arg_of");
3341             let _entered = span.enter();
3342             let is_return = arg_idx.is_none();
3343
3344             let layout = self.layout_of(ty)?;
3345             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3346                 // Don't pass the vtable, it's not an argument of the virtual fn.
3347                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3348                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3349                 make_thin_self_ptr(self, layout)
3350             } else {
3351                 layout
3352             };
3353
3354             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3355                 let mut attrs = ArgAttributes::new();
3356                 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3357                 attrs
3358             });
3359
3360             if arg.layout.is_zst() {
3361                 // For some forsaken reason, x86_64-pc-windows-gnu
3362                 // doesn't ignore zero-sized struct arguments.
3363                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3364                 if is_return
3365                     || rust_abi
3366                     || (!win_x64_gnu
3367                         && !linux_s390x_gnu_like
3368                         && !linux_sparc64_gnu_like
3369                         && !linux_powerpc_gnu_like)
3370                 {
3371                     arg.mode = PassMode::Ignore;
3372                 }
3373             }
3374
3375             Ok(arg)
3376         };
3377
3378         let mut fn_abi = FnAbi {
3379             ret: arg_of(sig.output(), None)?,
3380             args: inputs
3381                 .iter()
3382                 .copied()
3383                 .chain(extra_args.iter().copied())
3384                 .chain(caller_location)
3385                 .enumerate()
3386                 .map(|(i, ty)| arg_of(ty, Some(i)))
3387                 .collect::<Result<_, _>>()?,
3388             c_variadic: sig.c_variadic,
3389             fixed_count: inputs.len() as u32,
3390             conv,
3391             can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3392         };
3393         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3394         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3395         Ok(self.tcx.arena.alloc(fn_abi))
3396     }
3397
3398     #[tracing::instrument(level = "trace", skip(self))]
3399     fn fn_abi_adjust_for_abi(
3400         &self,
3401         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3402         abi: SpecAbi,
3403     ) -> Result<(), FnAbiError<'tcx>> {
3404         if abi == SpecAbi::Unadjusted {
3405             return Ok(());
3406         }
3407
3408         if abi == SpecAbi::Rust
3409             || abi == SpecAbi::RustCall
3410             || abi == SpecAbi::RustIntrinsic
3411             || abi == SpecAbi::PlatformIntrinsic
3412         {
3413             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3414                 if arg.is_ignore() {
3415                     return;
3416                 }
3417
3418                 match arg.layout.abi {
3419                     Abi::Aggregate { .. } => {}
3420
3421                     // This is a fun case! The gist of what this is doing is
3422                     // that we want callers and callees to always agree on the
3423                     // ABI of how they pass SIMD arguments. If we were to *not*
3424                     // make these arguments indirect then they'd be immediates
3425                     // in LLVM, which means that they'd used whatever the
3426                     // appropriate ABI is for the callee and the caller. That
3427                     // means, for example, if the caller doesn't have AVX
3428                     // enabled but the callee does, then passing an AVX argument
3429                     // across this boundary would cause corrupt data to show up.
3430                     //
3431                     // This problem is fixed by unconditionally passing SIMD
3432                     // arguments through memory between callers and callees
3433                     // which should get them all to agree on ABI regardless of
3434                     // target feature sets. Some more information about this
3435                     // issue can be found in #44367.
3436                     //
3437                     // Note that the platform intrinsic ABI is exempt here as
3438                     // that's how we connect up to LLVM and it's unstable
3439                     // anyway, we control all calls to it in libstd.
3440                     Abi::Vector { .. }
3441                         if abi != SpecAbi::PlatformIntrinsic
3442                             && self.tcx.sess.target.simd_types_indirect =>
3443                     {
3444                         arg.make_indirect();
3445                         return;
3446                     }
3447
3448                     _ => return,
3449                 }
3450
3451                 let size = arg.layout.size;
3452                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3453                     arg.make_indirect();
3454                 } else {
3455                     // We want to pass small aggregates as immediates, but using
3456                     // a LLVM aggregate type for this leads to bad optimizations,
3457                     // so we pick an appropriately sized integer type instead.
3458                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3459                 }
3460             };
3461             fixup(&mut fn_abi.ret);
3462             for arg in fn_abi.args.iter_mut() {
3463                 fixup(arg);
3464             }
3465         } else {
3466             fn_abi.adjust_for_foreign_abi(self, abi)?;
3467         }
3468
3469         Ok(())
3470     }
3471 }
3472
3473 #[tracing::instrument(level = "debug", skip(cx))]
3474 fn make_thin_self_ptr<'tcx>(
3475     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3476     layout: TyAndLayout<'tcx>,
3477 ) -> TyAndLayout<'tcx> {
3478     let tcx = cx.tcx();
3479     let fat_pointer_ty = if layout.is_unsized() {
3480         // unsized `self` is passed as a pointer to `self`
3481         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3482         tcx.mk_mut_ptr(layout.ty)
3483     } else {
3484         match layout.abi {
3485             Abi::ScalarPair(..) | Abi::Scalar(..) => (),
3486             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3487         }
3488
3489         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3490         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3491         // elsewhere in the compiler as a method on a `dyn Trait`.
3492         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3493         // get a built-in pointer type
3494         let mut fat_pointer_layout = layout;
3495         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3496             && !fat_pointer_layout.ty.is_region_ptr()
3497         {
3498             for i in 0..fat_pointer_layout.fields.count() {
3499                 let field_layout = fat_pointer_layout.field(cx, i);
3500
3501                 if !field_layout.is_zst() {
3502                     fat_pointer_layout = field_layout;
3503                     continue 'descend_newtypes;
3504                 }
3505             }
3506
3507             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3508         }
3509
3510         fat_pointer_layout.ty
3511     };
3512
3513     // we now have a type like `*mut RcBox<dyn Trait>`
3514     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3515     // this is understood as a special case elsewhere in the compiler
3516     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3517
3518     TyAndLayout {
3519         ty: fat_pointer_ty,
3520
3521         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3522         // should always work because the type is always `*mut ()`.
3523         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3524     }
3525 }