]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #101790 - TaKO8Ki:do-not-suggest-placeholder-to-const-and-static...
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{
6     self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
7     TyCtxt, TypeVisitable,
8 };
9 use rustc_ast as ast;
10 use rustc_attr as attr;
11 use rustc_hir as hir;
12 use rustc_hir::def_id::DefId;
13 use rustc_hir::lang_items::LangItem;
14 use rustc_index::bit_set::BitSet;
15 use rustc_index::vec::{Idx, IndexVec};
16 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
17 use rustc_span::symbol::Symbol;
18 use rustc_span::{Span, DUMMY_SP};
19 use rustc_target::abi::call::{
20     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
21 };
22 use rustc_target::abi::*;
23 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
24
25 use std::cmp::{self, Ordering};
26 use std::fmt;
27 use std::iter;
28 use std::num::NonZeroUsize;
29 use std::ops::Bound;
30
31 use rand::{seq::SliceRandom, SeedableRng};
32 use rand_xoshiro::Xoshiro128StarStar;
33
34 pub fn provide(providers: &mut ty::query::Providers) {
35     *providers =
36         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
37 }
38
39 pub trait IntegerExt {
40     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
41     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
43     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44     fn repr_discr<'tcx>(
45         tcx: TyCtxt<'tcx>,
46         ty: Ty<'tcx>,
47         repr: &ReprOptions,
48         min: i128,
49         max: i128,
50     ) -> (Integer, bool);
51 }
52
53 impl IntegerExt for Integer {
54     #[inline]
55     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
56         match (*self, signed) {
57             (I8, false) => tcx.types.u8,
58             (I16, false) => tcx.types.u16,
59             (I32, false) => tcx.types.u32,
60             (I64, false) => tcx.types.u64,
61             (I128, false) => tcx.types.u128,
62             (I8, true) => tcx.types.i8,
63             (I16, true) => tcx.types.i16,
64             (I32, true) => tcx.types.i32,
65             (I64, true) => tcx.types.i64,
66             (I128, true) => tcx.types.i128,
67         }
68     }
69
70     /// Gets the Integer type from an attr::IntType.
71     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
72         let dl = cx.data_layout();
73
74         match ity {
75             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
76             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
77             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
78             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
79             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
80             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
81                 dl.ptr_sized_integer()
82             }
83         }
84     }
85
86     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
87         match ity {
88             ty::IntTy::I8 => I8,
89             ty::IntTy::I16 => I16,
90             ty::IntTy::I32 => I32,
91             ty::IntTy::I64 => I64,
92             ty::IntTy::I128 => I128,
93             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
94         }
95     }
96     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
97         match ity {
98             ty::UintTy::U8 => I8,
99             ty::UintTy::U16 => I16,
100             ty::UintTy::U32 => I32,
101             ty::UintTy::U64 => I64,
102             ty::UintTy::U128 => I128,
103             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
104         }
105     }
106
107     /// Finds the appropriate Integer type and signedness for the given
108     /// signed discriminant range and `#[repr]` attribute.
109     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
110     /// that shouldn't affect anything, other than maybe debuginfo.
111     fn repr_discr<'tcx>(
112         tcx: TyCtxt<'tcx>,
113         ty: Ty<'tcx>,
114         repr: &ReprOptions,
115         min: i128,
116         max: i128,
117     ) -> (Integer, bool) {
118         // Theoretically, negative values could be larger in unsigned representation
119         // than the unsigned representation of the signed minimum. However, if there
120         // are any negative values, the only valid unsigned representation is u128
121         // which can fit all i128 values, so the result remains unaffected.
122         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
123         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
124
125         if let Some(ity) = repr.int {
126             let discr = Integer::from_attr(&tcx, ity);
127             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
128             if discr < fit {
129                 bug!(
130                     "Integer::repr_discr: `#[repr]` hint too small for \
131                       discriminant range of enum `{}",
132                     ty
133                 )
134             }
135             return (discr, ity.is_signed());
136         }
137
138         let at_least = if repr.c() {
139             // This is usually I32, however it can be different on some platforms,
140             // notably hexagon and arm-none/thumb-none
141             tcx.data_layout().c_enum_min_size
142         } else {
143             // repr(Rust) enums try to be as small as possible
144             I8
145         };
146
147         // If there are no negative values, we can use the unsigned fit.
148         if min >= 0 {
149             (cmp::max(unsigned_fit, at_least), false)
150         } else {
151             (cmp::max(signed_fit, at_least), true)
152         }
153     }
154 }
155
156 pub trait PrimitiveExt {
157     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
159 }
160
161 impl PrimitiveExt for Primitive {
162     #[inline]
163     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
164         match *self {
165             Int(i, signed) => i.to_ty(tcx, signed),
166             F32 => tcx.types.f32,
167             F64 => tcx.types.f64,
168             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169         }
170     }
171
172     /// Return an *integer* type matching this primitive.
173     /// Useful in particular when dealing with enum discriminants.
174     #[inline]
175     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
176         match *self {
177             Int(i, signed) => i.to_ty(tcx, signed),
178             Pointer => tcx.types.usize,
179             F32 | F64 => bug!("floats do not have an int type"),
180         }
181     }
182 }
183
184 /// The first half of a fat pointer.
185 ///
186 /// - For a trait object, this is the address of the box.
187 /// - For a slice, this is the base address.
188 pub const FAT_PTR_ADDR: usize = 0;
189
190 /// The second half of a fat pointer.
191 ///
192 /// - For a trait object, this is the address of the vtable.
193 /// - For a slice, this is the length.
194 pub const FAT_PTR_EXTRA: usize = 1;
195
196 /// The maximum supported number of lanes in a SIMD vector.
197 ///
198 /// This value is selected based on backend support:
199 /// * LLVM does not appear to have a vector width limit.
200 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
201 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
202
203 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
204 pub enum LayoutError<'tcx> {
205     Unknown(Ty<'tcx>),
206     SizeOverflow(Ty<'tcx>),
207     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
208 }
209
210 impl<'tcx> fmt::Display for LayoutError<'tcx> {
211     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
212         match *self {
213             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
214             LayoutError::SizeOverflow(ty) => {
215                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
216             }
217             LayoutError::NormalizationFailure(t, e) => write!(
218                 f,
219                 "unable to determine layout for `{}` because `{}` cannot be normalized",
220                 t,
221                 e.get_type_for_failure()
222             ),
223         }
224     }
225 }
226
227 #[instrument(skip(tcx, query), level = "debug")]
228 fn layout_of<'tcx>(
229     tcx: TyCtxt<'tcx>,
230     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
231 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
232     let (param_env, ty) = query.into_parts();
233     debug!(?ty);
234
235     let param_env = param_env.with_reveal_all_normalized(tcx);
236     let unnormalized_ty = ty;
237
238     // FIXME: We might want to have two different versions of `layout_of`:
239     // One that can be called after typecheck has completed and can use
240     // `normalize_erasing_regions` here and another one that can be called
241     // before typecheck has completed and uses `try_normalize_erasing_regions`.
242     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
243         Ok(t) => t,
244         Err(normalization_error) => {
245             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
246         }
247     };
248
249     if ty != unnormalized_ty {
250         // Ensure this layout is also cached for the normalized type.
251         return tcx.layout_of(param_env.and(ty));
252     }
253
254     let cx = LayoutCx { tcx, param_env };
255
256     let layout = cx.layout_of_uncached(ty)?;
257     let layout = TyAndLayout { ty, layout };
258
259     cx.record_layout_for_printing(layout);
260
261     sanity_check_layout(&cx, &layout);
262
263     Ok(layout)
264 }
265
266 #[derive(Clone, Copy)]
267 pub struct LayoutCx<'tcx, C> {
268     pub tcx: C,
269     pub param_env: ty::ParamEnv<'tcx>,
270 }
271
272 #[derive(Copy, Clone, Debug)]
273 enum StructKind {
274     /// A tuple, closure, or univariant which cannot be coerced to unsized.
275     AlwaysSized,
276     /// A univariant, the last field of which may be coerced to unsized.
277     MaybeUnsized,
278     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
279     Prefixed(Size, Align),
280 }
281
282 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
283 // This is used to go between `memory_index` (source field order to memory order)
284 // and `inverse_memory_index` (memory order to source field order).
285 // See also `FieldsShape::Arbitrary::memory_index` for more details.
286 // FIXME(eddyb) build a better abstraction for permutations, if possible.
287 fn invert_mapping(map: &[u32]) -> Vec<u32> {
288     let mut inverse = vec![0; map.len()];
289     for i in 0..map.len() {
290         inverse[map[i] as usize] = i as u32;
291     }
292     inverse
293 }
294
295 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
296     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
297         let dl = self.data_layout();
298         let b_align = b.align(dl);
299         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
300         let b_offset = a.size(dl).align_to(b_align.abi);
301         let size = (b_offset + b.size(dl)).align_to(align.abi);
302
303         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
304         // returns the last maximum.
305         let largest_niche = Niche::from_scalar(dl, b_offset, b)
306             .into_iter()
307             .chain(Niche::from_scalar(dl, Size::ZERO, a))
308             .max_by_key(|niche| niche.available(dl));
309
310         LayoutS {
311             variants: Variants::Single { index: VariantIdx::new(0) },
312             fields: FieldsShape::Arbitrary {
313                 offsets: vec![Size::ZERO, b_offset],
314                 memory_index: vec![0, 1],
315             },
316             abi: Abi::ScalarPair(a, b),
317             largest_niche,
318             align,
319             size,
320         }
321     }
322
323     fn univariant_uninterned(
324         &self,
325         ty: Ty<'tcx>,
326         fields: &[TyAndLayout<'_>],
327         repr: &ReprOptions,
328         kind: StructKind,
329     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
330         let dl = self.data_layout();
331         let pack = repr.pack;
332         if pack.is_some() && repr.align.is_some() {
333             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
334             return Err(LayoutError::Unknown(ty));
335         }
336
337         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
338
339         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
340
341         let optimize = !repr.inhibit_struct_field_reordering_opt();
342         if optimize {
343             let end =
344                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
345             let optimizing = &mut inverse_memory_index[..end];
346             let field_align = |f: &TyAndLayout<'_>| {
347                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
348             };
349
350             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
351             // the field ordering to try and catch some code making assumptions about layouts
352             // we don't guarantee
353             if repr.can_randomize_type_layout() {
354                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
355                 // randomize field ordering with
356                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
357
358                 // Shuffle the ordering of the fields
359                 optimizing.shuffle(&mut rng);
360
361             // Otherwise we just leave things alone and actually optimize the type's fields
362             } else {
363                 match kind {
364                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
365                         optimizing.sort_by_key(|&x| {
366                             // Place ZSTs first to avoid "interesting offsets",
367                             // especially with only one or two non-ZST fields.
368                             let f = &fields[x as usize];
369                             (!f.is_zst(), cmp::Reverse(field_align(f)))
370                         });
371                     }
372
373                     StructKind::Prefixed(..) => {
374                         // Sort in ascending alignment so that the layout stays optimal
375                         // regardless of the prefix
376                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
377                     }
378                 }
379
380                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
381                 //                 regardless of the status of `-Z randomize-layout`
382             }
383         }
384
385         // inverse_memory_index holds field indices by increasing memory offset.
386         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
387         // We now write field offsets to the corresponding offset slot;
388         // field 5 with offset 0 puts 0 in offsets[5].
389         // At the bottom of this function, we invert `inverse_memory_index` to
390         // produce `memory_index` (see `invert_mapping`).
391
392         let mut sized = true;
393         let mut offsets = vec![Size::ZERO; fields.len()];
394         let mut offset = Size::ZERO;
395         let mut largest_niche = None;
396         let mut largest_niche_available = 0;
397
398         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
399             let prefix_align =
400                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
401             align = align.max(AbiAndPrefAlign::new(prefix_align));
402             offset = prefix_size.align_to(prefix_align);
403         }
404
405         for &i in &inverse_memory_index {
406             let field = fields[i as usize];
407             if !sized {
408                 self.tcx.sess.delay_span_bug(
409                     DUMMY_SP,
410                     &format!(
411                         "univariant: field #{} of `{}` comes after unsized field",
412                         offsets.len(),
413                         ty
414                     ),
415                 );
416             }
417
418             if field.is_unsized() {
419                 sized = false;
420             }
421
422             // Invariant: offset < dl.obj_size_bound() <= 1<<61
423             let field_align = if let Some(pack) = pack {
424                 field.align.min(AbiAndPrefAlign::new(pack))
425             } else {
426                 field.align
427             };
428             offset = offset.align_to(field_align.abi);
429             align = align.max(field_align);
430
431             debug!("univariant offset: {:?} field: {:#?}", offset, field);
432             offsets[i as usize] = offset;
433
434             if let Some(mut niche) = field.largest_niche {
435                 let available = niche.available(dl);
436                 if available > largest_niche_available {
437                     largest_niche_available = available;
438                     niche.offset += offset;
439                     largest_niche = Some(niche);
440                 }
441             }
442
443             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
444         }
445
446         if let Some(repr_align) = repr.align {
447             align = align.max(AbiAndPrefAlign::new(repr_align));
448         }
449
450         debug!("univariant min_size: {:?}", offset);
451         let min_size = offset;
452
453         // As stated above, inverse_memory_index holds field indices by increasing offset.
454         // This makes it an already-sorted view of the offsets vec.
455         // To invert it, consider:
456         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
457         // Field 5 would be the first element, so memory_index is i:
458         // Note: if we didn't optimize, it's already right.
459
460         let memory_index =
461             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
462
463         let size = min_size.align_to(align.abi);
464         let mut abi = Abi::Aggregate { sized };
465
466         // Unpack newtype ABIs and find scalar pairs.
467         if sized && size.bytes() > 0 {
468             // All other fields must be ZSTs.
469             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
470
471             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
472                 // We have exactly one non-ZST field.
473                 (Some((i, field)), None, None) => {
474                     // Field fills the struct and it has a scalar or scalar pair ABI.
475                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
476                     {
477                         match field.abi {
478                             // For plain scalars, or vectors of them, we can't unpack
479                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
480                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
481                                 abi = field.abi;
482                             }
483                             // But scalar pairs are Rust-specific and get
484                             // treated as aggregates by C ABIs anyway.
485                             Abi::ScalarPair(..) => {
486                                 abi = field.abi;
487                             }
488                             _ => {}
489                         }
490                     }
491                 }
492
493                 // Two non-ZST fields, and they're both scalars.
494                 (Some((i, a)), Some((j, b)), None) => {
495                     match (a.abi, b.abi) {
496                         (Abi::Scalar(a), Abi::Scalar(b)) => {
497                             // Order by the memory placement, not source order.
498                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
499                                 ((i, a), (j, b))
500                             } else {
501                                 ((j, b), (i, a))
502                             };
503                             let pair = self.scalar_pair(a, b);
504                             let pair_offsets = match pair.fields {
505                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
506                                     assert_eq!(memory_index, &[0, 1]);
507                                     offsets
508                                 }
509                                 _ => bug!(),
510                             };
511                             if offsets[i] == pair_offsets[0]
512                                 && offsets[j] == pair_offsets[1]
513                                 && align == pair.align
514                                 && size == pair.size
515                             {
516                                 // We can use `ScalarPair` only when it matches our
517                                 // already computed layout (including `#[repr(C)]`).
518                                 abi = pair.abi;
519                             }
520                         }
521                         _ => {}
522                     }
523                 }
524
525                 _ => {}
526             }
527         }
528
529         if fields.iter().any(|f| f.abi.is_uninhabited()) {
530             abi = Abi::Uninhabited;
531         }
532
533         Ok(LayoutS {
534             variants: Variants::Single { index: VariantIdx::new(0) },
535             fields: FieldsShape::Arbitrary { offsets, memory_index },
536             abi,
537             largest_niche,
538             align,
539             size,
540         })
541     }
542
543     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
544         let tcx = self.tcx;
545         let param_env = self.param_env;
546         let dl = self.data_layout();
547         let scalar_unit = |value: Primitive| {
548             let size = value.size(dl);
549             assert!(size.bits() <= 128);
550             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
551         };
552         let scalar =
553             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
554
555         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
556             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
557         };
558         debug_assert!(!ty.has_infer_types_or_consts());
559
560         Ok(match *ty.kind() {
561             // Basic scalars.
562             ty::Bool => tcx.intern_layout(LayoutS::scalar(
563                 self,
564                 Scalar::Initialized {
565                     value: Int(I8, false),
566                     valid_range: WrappingRange { start: 0, end: 1 },
567                 },
568             )),
569             ty::Char => tcx.intern_layout(LayoutS::scalar(
570                 self,
571                 Scalar::Initialized {
572                     value: Int(I32, false),
573                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
574                 },
575             )),
576             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
577             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
578             ty::Float(fty) => scalar(match fty {
579                 ty::FloatTy::F32 => F32,
580                 ty::FloatTy::F64 => F64,
581             }),
582             ty::FnPtr(_) => {
583                 let mut ptr = scalar_unit(Pointer);
584                 ptr.valid_range_mut().start = 1;
585                 tcx.intern_layout(LayoutS::scalar(self, ptr))
586             }
587
588             // The never type.
589             ty::Never => tcx.intern_layout(LayoutS {
590                 variants: Variants::Single { index: VariantIdx::new(0) },
591                 fields: FieldsShape::Primitive,
592                 abi: Abi::Uninhabited,
593                 largest_niche: None,
594                 align: dl.i8_align,
595                 size: Size::ZERO,
596             }),
597
598             // Potentially-wide pointers.
599             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
600                 let mut data_ptr = scalar_unit(Pointer);
601                 if !ty.is_unsafe_ptr() {
602                     data_ptr.valid_range_mut().start = 1;
603                 }
604
605                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
606                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
607                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
608                 }
609
610                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
611                 let metadata = match unsized_part.kind() {
612                     ty::Foreign(..) => {
613                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
614                     }
615                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
616                     ty::Dynamic(..) => {
617                         let mut vtable = scalar_unit(Pointer);
618                         vtable.valid_range_mut().start = 1;
619                         vtable
620                     }
621                     _ => return Err(LayoutError::Unknown(unsized_part)),
622                 };
623
624                 // Effectively a (ptr, meta) tuple.
625                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
626             }
627
628             ty::Dynamic(_, _, ty::DynStar) => {
629                 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
630                 data.valid_range_mut().start = 0;
631                 let mut vtable = scalar_unit(Pointer);
632                 vtable.valid_range_mut().start = 1;
633                 tcx.intern_layout(self.scalar_pair(data, vtable))
634             }
635
636             // Arrays and slices.
637             ty::Array(element, mut count) => {
638                 if count.has_projections() {
639                     count = tcx.normalize_erasing_regions(param_env, count);
640                     if count.has_projections() {
641                         return Err(LayoutError::Unknown(ty));
642                     }
643                 }
644
645                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
646                 let element = self.layout_of(element)?;
647                 let size =
648                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
649
650                 let abi =
651                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
652                         Abi::Uninhabited
653                     } else {
654                         Abi::Aggregate { sized: true }
655                     };
656
657                 let largest_niche = if count != 0 { element.largest_niche } else { None };
658
659                 tcx.intern_layout(LayoutS {
660                     variants: Variants::Single { index: VariantIdx::new(0) },
661                     fields: FieldsShape::Array { stride: element.size, count },
662                     abi,
663                     largest_niche,
664                     align: element.align,
665                     size,
666                 })
667             }
668             ty::Slice(element) => {
669                 let element = self.layout_of(element)?;
670                 tcx.intern_layout(LayoutS {
671                     variants: Variants::Single { index: VariantIdx::new(0) },
672                     fields: FieldsShape::Array { stride: element.size, count: 0 },
673                     abi: Abi::Aggregate { sized: false },
674                     largest_niche: None,
675                     align: element.align,
676                     size: Size::ZERO,
677                 })
678             }
679             ty::Str => tcx.intern_layout(LayoutS {
680                 variants: Variants::Single { index: VariantIdx::new(0) },
681                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
682                 abi: Abi::Aggregate { sized: false },
683                 largest_niche: None,
684                 align: dl.i8_align,
685                 size: Size::ZERO,
686             }),
687
688             // Odd unit types.
689             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
690             ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
691                 let mut unit = self.univariant_uninterned(
692                     ty,
693                     &[],
694                     &ReprOptions::default(),
695                     StructKind::AlwaysSized,
696                 )?;
697                 match unit.abi {
698                     Abi::Aggregate { ref mut sized } => *sized = false,
699                     _ => bug!(),
700                 }
701                 tcx.intern_layout(unit)
702             }
703
704             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
705
706             ty::Closure(_, ref substs) => {
707                 let tys = substs.as_closure().upvar_tys();
708                 univariant(
709                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
710                     &ReprOptions::default(),
711                     StructKind::AlwaysSized,
712                 )?
713             }
714
715             ty::Tuple(tys) => {
716                 let kind =
717                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
718
719                 univariant(
720                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
721                     &ReprOptions::default(),
722                     kind,
723                 )?
724             }
725
726             // SIMD vector types.
727             ty::Adt(def, substs) if def.repr().simd() => {
728                 if !def.is_struct() {
729                     // Should have yielded E0517 by now.
730                     tcx.sess.delay_span_bug(
731                         DUMMY_SP,
732                         "#[repr(simd)] was applied to an ADT that is not a struct",
733                     );
734                     return Err(LayoutError::Unknown(ty));
735                 }
736
737                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
738                 //
739                 // * #[repr(simd)] struct S(T, T, T, T);
740                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
741                 // * #[repr(simd)] struct S([T; 4])
742                 //
743                 // where T is a primitive scalar (integer/float/pointer).
744
745                 // SIMD vectors with zero fields are not supported.
746                 // (should be caught by typeck)
747                 if def.non_enum_variant().fields.is_empty() {
748                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
749                 }
750
751                 // Type of the first ADT field:
752                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
753
754                 // Heterogeneous SIMD vectors are not supported:
755                 // (should be caught by typeck)
756                 for fi in &def.non_enum_variant().fields {
757                     if fi.ty(tcx, substs) != f0_ty {
758                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
759                     }
760                 }
761
762                 // The element type and number of elements of the SIMD vector
763                 // are obtained from:
764                 //
765                 // * the element type and length of the single array field, if
766                 // the first field is of array type, or
767                 //
768                 // * the homogeneous field type and the number of fields.
769                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
770                     // First ADT field is an array:
771
772                     // SIMD vectors with multiple array fields are not supported:
773                     // (should be caught by typeck)
774                     if def.non_enum_variant().fields.len() != 1 {
775                         tcx.sess.fatal(&format!(
776                             "monomorphising SIMD type `{}` with more than one array field",
777                             ty
778                         ));
779                     }
780
781                     // Extract the number of elements from the layout of the array field:
782                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
783                         return Err(LayoutError::Unknown(ty));
784                     };
785
786                     (*e_ty, *count, true)
787                 } else {
788                     // First ADT field is not an array:
789                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
790                 };
791
792                 // SIMD vectors of zero length are not supported.
793                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
794                 // support.
795                 //
796                 // Can't be caught in typeck if the array length is generic.
797                 if e_len == 0 {
798                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
799                 } else if e_len > MAX_SIMD_LANES {
800                     tcx.sess.fatal(&format!(
801                         "monomorphising SIMD type `{}` of length greater than {}",
802                         ty, MAX_SIMD_LANES,
803                     ));
804                 }
805
806                 // Compute the ABI of the element type:
807                 let e_ly = self.layout_of(e_ty)?;
808                 let Abi::Scalar(e_abi) = e_ly.abi else {
809                     // This error isn't caught in typeck, e.g., if
810                     // the element type of the vector is generic.
811                     tcx.sess.fatal(&format!(
812                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
813                         (integer/float/pointer) element type `{}`",
814                         ty, e_ty
815                     ))
816                 };
817
818                 // Compute the size and alignment of the vector:
819                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
820                 let align = dl.vector_align(size);
821                 let size = size.align_to(align.abi);
822
823                 // Compute the placement of the vector fields:
824                 let fields = if is_array {
825                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
826                 } else {
827                     FieldsShape::Array { stride: e_ly.size, count: e_len }
828                 };
829
830                 tcx.intern_layout(LayoutS {
831                     variants: Variants::Single { index: VariantIdx::new(0) },
832                     fields,
833                     abi: Abi::Vector { element: e_abi, count: e_len },
834                     largest_niche: e_ly.largest_niche,
835                     size,
836                     align,
837                 })
838             }
839
840             // ADTs.
841             ty::Adt(def, substs) => {
842                 // Cache the field layouts.
843                 let variants = def
844                     .variants()
845                     .iter()
846                     .map(|v| {
847                         v.fields
848                             .iter()
849                             .map(|field| self.layout_of(field.ty(tcx, substs)))
850                             .collect::<Result<Vec<_>, _>>()
851                     })
852                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
853
854                 if def.is_union() {
855                     if def.repr().pack.is_some() && def.repr().align.is_some() {
856                         self.tcx.sess.delay_span_bug(
857                             tcx.def_span(def.did()),
858                             "union cannot be packed and aligned",
859                         );
860                         return Err(LayoutError::Unknown(ty));
861                     }
862
863                     let mut align =
864                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
865
866                     if let Some(repr_align) = def.repr().align {
867                         align = align.max(AbiAndPrefAlign::new(repr_align));
868                     }
869
870                     let optimize = !def.repr().inhibit_union_abi_opt();
871                     let mut size = Size::ZERO;
872                     let mut abi = Abi::Aggregate { sized: true };
873                     let index = VariantIdx::new(0);
874                     for field in &variants[index] {
875                         assert!(!field.is_unsized());
876                         align = align.max(field.align);
877
878                         // If all non-ZST fields have the same ABI, forward this ABI
879                         if optimize && !field.is_zst() {
880                             // Discard valid range information and allow undef
881                             let field_abi = match field.abi {
882                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
883                                 Abi::ScalarPair(x, y) => {
884                                     Abi::ScalarPair(x.to_union(), y.to_union())
885                                 }
886                                 Abi::Vector { element: x, count } => {
887                                     Abi::Vector { element: x.to_union(), count }
888                                 }
889                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
890                                     Abi::Aggregate { sized: true }
891                                 }
892                             };
893
894                             if size == Size::ZERO {
895                                 // first non ZST: initialize 'abi'
896                                 abi = field_abi;
897                             } else if abi != field_abi {
898                                 // different fields have different ABI: reset to Aggregate
899                                 abi = Abi::Aggregate { sized: true };
900                             }
901                         }
902
903                         size = cmp::max(size, field.size);
904                     }
905
906                     if let Some(pack) = def.repr().pack {
907                         align = align.min(AbiAndPrefAlign::new(pack));
908                     }
909
910                     return Ok(tcx.intern_layout(LayoutS {
911                         variants: Variants::Single { index },
912                         fields: FieldsShape::Union(
913                             NonZeroUsize::new(variants[index].len())
914                                 .ok_or(LayoutError::Unknown(ty))?,
915                         ),
916                         abi,
917                         largest_niche: None,
918                         align,
919                         size: size.align_to(align.abi),
920                     }));
921                 }
922
923                 // A variant is absent if it's uninhabited and only has ZST fields.
924                 // Present uninhabited variants only require space for their fields,
925                 // but *not* an encoding of the discriminant (e.g., a tag value).
926                 // See issue #49298 for more details on the need to leave space
927                 // for non-ZST uninhabited data (mostly partial initialization).
928                 let absent = |fields: &[TyAndLayout<'_>]| {
929                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
930                     let is_zst = fields.iter().all(|f| f.is_zst());
931                     uninhabited && is_zst
932                 };
933                 let (present_first, present_second) = {
934                     let mut present_variants = variants
935                         .iter_enumerated()
936                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
937                     (present_variants.next(), present_variants.next())
938                 };
939                 let present_first = match present_first {
940                     Some(present_first) => present_first,
941                     // Uninhabited because it has no variants, or only absent ones.
942                     None if def.is_enum() => {
943                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
944                     }
945                     // If it's a struct, still compute a layout so that we can still compute the
946                     // field offsets.
947                     None => VariantIdx::new(0),
948                 };
949
950                 let is_struct = !def.is_enum() ||
951                     // Only one variant is present.
952                     (present_second.is_none() &&
953                     // Representation optimizations are allowed.
954                     !def.repr().inhibit_enum_layout_opt());
955                 if is_struct {
956                     // Struct, or univariant enum equivalent to a struct.
957                     // (Typechecking will reject discriminant-sizing attrs.)
958
959                     let v = present_first;
960                     let kind = if def.is_enum() || variants[v].is_empty() {
961                         StructKind::AlwaysSized
962                     } else {
963                         let param_env = tcx.param_env(def.did());
964                         let last_field = def.variant(v).fields.last().unwrap();
965                         let always_sized =
966                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
967                         if !always_sized {
968                             StructKind::MaybeUnsized
969                         } else {
970                             StructKind::AlwaysSized
971                         }
972                     };
973
974                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
975                     st.variants = Variants::Single { index: v };
976
977                     if def.is_unsafe_cell() {
978                         let hide_niches = |scalar: &mut _| match scalar {
979                             Scalar::Initialized { value, valid_range } => {
980                                 *valid_range = WrappingRange::full(value.size(dl))
981                             }
982                             // Already doesn't have any niches
983                             Scalar::Union { .. } => {}
984                         };
985                         match &mut st.abi {
986                             Abi::Uninhabited => {}
987                             Abi::Scalar(scalar) => hide_niches(scalar),
988                             Abi::ScalarPair(a, b) => {
989                                 hide_niches(a);
990                                 hide_niches(b);
991                             }
992                             Abi::Vector { element, count: _ } => hide_niches(element),
993                             Abi::Aggregate { sized: _ } => {}
994                         }
995                         st.largest_niche = None;
996                         return Ok(tcx.intern_layout(st));
997                     }
998
999                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1000                     match st.abi {
1001                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1002                             // the asserts ensure that we are not using the
1003                             // `#[rustc_layout_scalar_valid_range(n)]`
1004                             // attribute to widen the range of anything as that would probably
1005                             // result in UB somewhere
1006                             // FIXME(eddyb) the asserts are probably not needed,
1007                             // as larger validity ranges would result in missed
1008                             // optimizations, *not* wrongly assuming the inner
1009                             // value is valid. e.g. unions enlarge validity ranges,
1010                             // because the values may be uninitialized.
1011                             if let Bound::Included(start) = start {
1012                                 // FIXME(eddyb) this might be incorrect - it doesn't
1013                                 // account for wrap-around (end < start) ranges.
1014                                 let valid_range = scalar.valid_range_mut();
1015                                 assert!(valid_range.start <= start);
1016                                 valid_range.start = start;
1017                             }
1018                             if let Bound::Included(end) = end {
1019                                 // FIXME(eddyb) this might be incorrect - it doesn't
1020                                 // account for wrap-around (end < start) ranges.
1021                                 let valid_range = scalar.valid_range_mut();
1022                                 assert!(valid_range.end >= end);
1023                                 valid_range.end = end;
1024                             }
1025
1026                             // Update `largest_niche` if we have introduced a larger niche.
1027                             let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1028                             if let Some(niche) = niche {
1029                                 match st.largest_niche {
1030                                     Some(largest_niche) => {
1031                                         // Replace the existing niche even if they're equal,
1032                                         // because this one is at a lower offset.
1033                                         if largest_niche.available(dl) <= niche.available(dl) {
1034                                             st.largest_niche = Some(niche);
1035                                         }
1036                                     }
1037                                     None => st.largest_niche = Some(niche),
1038                                 }
1039                             }
1040                         }
1041                         _ => assert!(
1042                             start == Bound::Unbounded && end == Bound::Unbounded,
1043                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1044                             def,
1045                             st,
1046                         ),
1047                     }
1048
1049                     return Ok(tcx.intern_layout(st));
1050                 }
1051
1052                 // At this point, we have handled all unions and
1053                 // structs. (We have also handled univariant enums
1054                 // that allow representation optimization.)
1055                 assert!(def.is_enum());
1056
1057                 // Until we've decided whether to use the tagged or
1058                 // niche filling LayoutS, we don't want to intern the
1059                 // variant layouts, so we can't store them in the
1060                 // overall LayoutS. Store the overall LayoutS
1061                 // and the variant LayoutSs here until then.
1062                 struct TmpLayout<'tcx> {
1063                     layout: LayoutS<'tcx>,
1064                     variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1065                 }
1066
1067                 let calculate_niche_filling_layout =
1068                     || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1069                         // The current code for niche-filling relies on variant indices
1070                         // instead of actual discriminants, so enums with
1071                         // explicit discriminants (RFC #2363) would misbehave.
1072                         if def.repr().inhibit_enum_layout_opt()
1073                             || def
1074                                 .variants()
1075                                 .iter_enumerated()
1076                                 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1077                         {
1078                             return Ok(None);
1079                         }
1080
1081                         if variants.len() < 2 {
1082                             return Ok(None);
1083                         }
1084
1085                         let mut align = dl.aggregate_align;
1086                         let mut variant_layouts = variants
1087                             .iter_enumerated()
1088                             .map(|(j, v)| {
1089                                 let mut st = self.univariant_uninterned(
1090                                     ty,
1091                                     v,
1092                                     &def.repr(),
1093                                     StructKind::AlwaysSized,
1094                                 )?;
1095                                 st.variants = Variants::Single { index: j };
1096
1097                                 align = align.max(st.align);
1098
1099                                 Ok(st)
1100                             })
1101                             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1102
1103                         let largest_variant_index = match variant_layouts
1104                             .iter_enumerated()
1105                             .max_by_key(|(_i, layout)| layout.size.bytes())
1106                             .map(|(i, _layout)| i)
1107                         {
1108                             None => return Ok(None),
1109                             Some(i) => i,
1110                         };
1111
1112                         let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1113                         let needs_disc = |index: VariantIdx| {
1114                             index != largest_variant_index && !absent(&variants[index])
1115                         };
1116                         let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1117                             ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1118
1119                         let count = niche_variants.size_hint().1.unwrap() as u128;
1120
1121                         // Find the field with the largest niche
1122                         let (field_index, niche, (niche_start, niche_scalar)) = match variants
1123                             [largest_variant_index]
1124                             .iter()
1125                             .enumerate()
1126                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1127                             .max_by_key(|(_, niche)| niche.available(dl))
1128                             .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1129                         {
1130                             None => return Ok(None),
1131                             Some(x) => x,
1132                         };
1133
1134                         let niche_offset = niche.offset
1135                             + variant_layouts[largest_variant_index].fields.offset(field_index);
1136                         let niche_size = niche.value.size(dl);
1137                         let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1138
1139                         let all_variants_fit =
1140                             variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1141                                 if i == largest_variant_index {
1142                                     return true;
1143                                 }
1144
1145                                 layout.largest_niche = None;
1146
1147                                 if layout.size <= niche_offset {
1148                                     // This variant will fit before the niche.
1149                                     return true;
1150                                 }
1151
1152                                 // Determine if it'll fit after the niche.
1153                                 let this_align = layout.align.abi;
1154                                 let this_offset = (niche_offset + niche_size).align_to(this_align);
1155
1156                                 if this_offset + layout.size > size {
1157                                     return false;
1158                                 }
1159
1160                                 // It'll fit, but we need to make some adjustments.
1161                                 match layout.fields {
1162                                     FieldsShape::Arbitrary { ref mut offsets, .. } => {
1163                                         for (j, offset) in offsets.iter_mut().enumerate() {
1164                                             if !variants[i][j].is_zst() {
1165                                                 *offset += this_offset;
1166                                             }
1167                                         }
1168                                     }
1169                                     _ => {
1170                                         panic!("Layout of fields should be Arbitrary for variants")
1171                                     }
1172                                 }
1173
1174                                 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1175                                 if !layout.abi.is_uninhabited() {
1176                                     layout.abi = Abi::Aggregate { sized: true };
1177                                 }
1178                                 layout.size += this_offset;
1179
1180                                 true
1181                             });
1182
1183                         if !all_variants_fit {
1184                             return Ok(None);
1185                         }
1186
1187                         let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1188
1189                         let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1190                             i == largest_variant_index || layout.size == Size::ZERO
1191                         });
1192                         let same_size = size == variant_layouts[largest_variant_index].size;
1193                         let same_align = align == variant_layouts[largest_variant_index].align;
1194
1195                         let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1196                             Abi::Uninhabited
1197                         } else if same_size && same_align && others_zst {
1198                             match variant_layouts[largest_variant_index].abi {
1199                                 // When the total alignment and size match, we can use the
1200                                 // same ABI as the scalar variant with the reserved niche.
1201                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1202                                 Abi::ScalarPair(first, second) => {
1203                                     // Only the niche is guaranteed to be initialised,
1204                                     // so use union layouts for the other primitive.
1205                                     if niche_offset == Size::ZERO {
1206                                         Abi::ScalarPair(niche_scalar, second.to_union())
1207                                     } else {
1208                                         Abi::ScalarPair(first.to_union(), niche_scalar)
1209                                     }
1210                                 }
1211                                 _ => Abi::Aggregate { sized: true },
1212                             }
1213                         } else {
1214                             Abi::Aggregate { sized: true }
1215                         };
1216
1217                         let layout = LayoutS {
1218                             variants: Variants::Multiple {
1219                                 tag: niche_scalar,
1220                                 tag_encoding: TagEncoding::Niche {
1221                                     untagged_variant: largest_variant_index,
1222                                     niche_variants,
1223                                     niche_start,
1224                                 },
1225                                 tag_field: 0,
1226                                 variants: IndexVec::new(),
1227                             },
1228                             fields: FieldsShape::Arbitrary {
1229                                 offsets: vec![niche_offset],
1230                                 memory_index: vec![0],
1231                             },
1232                             abi,
1233                             largest_niche,
1234                             size,
1235                             align,
1236                         };
1237
1238                         Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1239                     };
1240
1241                 let niche_filling_layout = calculate_niche_filling_layout()?;
1242
1243                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1244                 let discr_type = def.repr().discr_type();
1245                 let bits = Integer::from_attr(self, discr_type).size().bits();
1246                 for (i, discr) in def.discriminants(tcx) {
1247                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1248                         continue;
1249                     }
1250                     let mut x = discr.val as i128;
1251                     if discr_type.is_signed() {
1252                         // sign extend the raw representation to be an i128
1253                         x = (x << (128 - bits)) >> (128 - bits);
1254                     }
1255                     if x < min {
1256                         min = x;
1257                     }
1258                     if x > max {
1259                         max = x;
1260                     }
1261                 }
1262                 // We might have no inhabited variants, so pretend there's at least one.
1263                 if (min, max) == (i128::MAX, i128::MIN) {
1264                     min = 0;
1265                     max = 0;
1266                 }
1267                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1268                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1269
1270                 let mut align = dl.aggregate_align;
1271                 let mut size = Size::ZERO;
1272
1273                 // We're interested in the smallest alignment, so start large.
1274                 let mut start_align = Align::from_bytes(256).unwrap();
1275                 assert_eq!(Integer::for_align(dl, start_align), None);
1276
1277                 // repr(C) on an enum tells us to make a (tag, union) layout,
1278                 // so we need to grow the prefix alignment to be at least
1279                 // the alignment of the union. (This value is used both for
1280                 // determining the alignment of the overall enum, and the
1281                 // determining the alignment of the payload after the tag.)
1282                 let mut prefix_align = min_ity.align(dl).abi;
1283                 if def.repr().c() {
1284                     for fields in &variants {
1285                         for field in fields {
1286                             prefix_align = prefix_align.max(field.align.abi);
1287                         }
1288                     }
1289                 }
1290
1291                 // Create the set of structs that represent each variant.
1292                 let mut layout_variants = variants
1293                     .iter_enumerated()
1294                     .map(|(i, field_layouts)| {
1295                         let mut st = self.univariant_uninterned(
1296                             ty,
1297                             &field_layouts,
1298                             &def.repr(),
1299                             StructKind::Prefixed(min_ity.size(), prefix_align),
1300                         )?;
1301                         st.variants = Variants::Single { index: i };
1302                         // Find the first field we can't move later
1303                         // to make room for a larger discriminant.
1304                         for field in
1305                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1306                         {
1307                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1308                                 start_align = start_align.min(field.align.abi);
1309                                 break;
1310                             }
1311                         }
1312                         size = cmp::max(size, st.size);
1313                         align = align.max(st.align);
1314                         Ok(st)
1315                     })
1316                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1317
1318                 // Align the maximum variant size to the largest alignment.
1319                 size = size.align_to(align.abi);
1320
1321                 if size.bytes() >= dl.obj_size_bound() {
1322                     return Err(LayoutError::SizeOverflow(ty));
1323                 }
1324
1325                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1326                 if typeck_ity < min_ity {
1327                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1328                     // some reason at this point (based on values discriminant can take on). Mostly
1329                     // because this discriminant will be loaded, and then stored into variable of
1330                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1331                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1332                     // discriminant values. That would be a bug, because then, in codegen, in order
1333                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1334                     // space necessary to represent would have to be discarded (or layout is wrong
1335                     // on thinking it needs 16 bits)
1336                     bug!(
1337                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1338                         min_ity,
1339                         typeck_ity
1340                     );
1341                     // However, it is fine to make discr type however large (as an optimisation)
1342                     // after this point â€“ we’ll just truncate the value we load in codegen.
1343                 }
1344
1345                 // Check to see if we should use a different type for the
1346                 // discriminant. We can safely use a type with the same size
1347                 // as the alignment of the first field of each variant.
1348                 // We increase the size of the discriminant to avoid LLVM copying
1349                 // padding when it doesn't need to. This normally causes unaligned
1350                 // load/stores and excessive memcpy/memset operations. By using a
1351                 // bigger integer size, LLVM can be sure about its contents and
1352                 // won't be so conservative.
1353
1354                 // Use the initial field alignment
1355                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1356                     min_ity
1357                 } else {
1358                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1359                 };
1360
1361                 // If the alignment is not larger than the chosen discriminant size,
1362                 // don't use the alignment as the final size.
1363                 if ity <= min_ity {
1364                     ity = min_ity;
1365                 } else {
1366                     // Patch up the variants' first few fields.
1367                     let old_ity_size = min_ity.size();
1368                     let new_ity_size = ity.size();
1369                     for variant in &mut layout_variants {
1370                         match variant.fields {
1371                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1372                                 for i in offsets {
1373                                     if *i <= old_ity_size {
1374                                         assert_eq!(*i, old_ity_size);
1375                                         *i = new_ity_size;
1376                                     }
1377                                 }
1378                                 // We might be making the struct larger.
1379                                 if variant.size <= old_ity_size {
1380                                     variant.size = new_ity_size;
1381                                 }
1382                             }
1383                             _ => bug!(),
1384                         }
1385                     }
1386                 }
1387
1388                 let tag_mask = ity.size().unsigned_int_max();
1389                 let tag = Scalar::Initialized {
1390                     value: Int(ity, signed),
1391                     valid_range: WrappingRange {
1392                         start: (min as u128 & tag_mask),
1393                         end: (max as u128 & tag_mask),
1394                     },
1395                 };
1396                 let mut abi = Abi::Aggregate { sized: true };
1397
1398                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1399                     abi = Abi::Uninhabited;
1400                 } else if tag.size(dl) == size {
1401                     // Make sure we only use scalar layout when the enum is entirely its
1402                     // own tag (i.e. it has no padding nor any non-ZST variant fields).
1403                     abi = Abi::Scalar(tag);
1404                 } else {
1405                     // Try to use a ScalarPair for all tagged enums.
1406                     let mut common_prim = None;
1407                     let mut common_prim_initialized_in_all_variants = true;
1408                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1409                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1410                             bug!();
1411                         };
1412                         let mut fields =
1413                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1414                         let (field, offset) = match (fields.next(), fields.next()) {
1415                             (None, None) => {
1416                                 common_prim_initialized_in_all_variants = false;
1417                                 continue;
1418                             }
1419                             (Some(pair), None) => pair,
1420                             _ => {
1421                                 common_prim = None;
1422                                 break;
1423                             }
1424                         };
1425                         let prim = match field.abi {
1426                             Abi::Scalar(scalar) => {
1427                                 common_prim_initialized_in_all_variants &=
1428                                     matches!(scalar, Scalar::Initialized { .. });
1429                                 scalar.primitive()
1430                             }
1431                             _ => {
1432                                 common_prim = None;
1433                                 break;
1434                             }
1435                         };
1436                         if let Some(pair) = common_prim {
1437                             // This is pretty conservative. We could go fancier
1438                             // by conflating things like i32 and u32, or even
1439                             // realising that (u8, u8) could just cohabit with
1440                             // u16 or even u32.
1441                             if pair != (prim, offset) {
1442                                 common_prim = None;
1443                                 break;
1444                             }
1445                         } else {
1446                             common_prim = Some((prim, offset));
1447                         }
1448                     }
1449                     if let Some((prim, offset)) = common_prim {
1450                         let prim_scalar = if common_prim_initialized_in_all_variants {
1451                             scalar_unit(prim)
1452                         } else {
1453                             // Common prim might be uninit.
1454                             Scalar::Union { value: prim }
1455                         };
1456                         let pair = self.scalar_pair(tag, prim_scalar);
1457                         let pair_offsets = match pair.fields {
1458                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1459                                 assert_eq!(memory_index, &[0, 1]);
1460                                 offsets
1461                             }
1462                             _ => bug!(),
1463                         };
1464                         if pair_offsets[0] == Size::ZERO
1465                             && pair_offsets[1] == *offset
1466                             && align == pair.align
1467                             && size == pair.size
1468                         {
1469                             // We can use `ScalarPair` only when it matches our
1470                             // already computed layout (including `#[repr(C)]`).
1471                             abi = pair.abi;
1472                         }
1473                     }
1474                 }
1475
1476                 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1477                 // variants to ensure they are consistent. This is because a downcast is
1478                 // semantically a NOP, and thus should not affect layout.
1479                 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1480                     for variant in &mut layout_variants {
1481                         // We only do this for variants with fields; the others are not accessed anyway.
1482                         // Also do not overwrite any already existing "clever" ABIs.
1483                         if variant.fields.count() > 0
1484                             && matches!(variant.abi, Abi::Aggregate { .. })
1485                         {
1486                             variant.abi = abi;
1487                             // Also need to bump up the size and alignment, so that the entire value fits in here.
1488                             variant.size = cmp::max(variant.size, size);
1489                             variant.align.abi = cmp::max(variant.align.abi, align.abi);
1490                         }
1491                     }
1492                 }
1493
1494                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1495
1496                 let tagged_layout = LayoutS {
1497                     variants: Variants::Multiple {
1498                         tag,
1499                         tag_encoding: TagEncoding::Direct,
1500                         tag_field: 0,
1501                         variants: IndexVec::new(),
1502                     },
1503                     fields: FieldsShape::Arbitrary {
1504                         offsets: vec![Size::ZERO],
1505                         memory_index: vec![0],
1506                     },
1507                     largest_niche,
1508                     abi,
1509                     align,
1510                     size,
1511                 };
1512
1513                 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1514
1515                 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1516                     (tl, Some(nl)) => {
1517                         // Pick the smaller layout; otherwise,
1518                         // pick the layout with the larger niche; otherwise,
1519                         // pick tagged as it has simpler codegen.
1520                         use Ordering::*;
1521                         let niche_size = |tmp_l: &TmpLayout<'_>| {
1522                             tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1523                         };
1524                         match (
1525                             tl.layout.size.cmp(&nl.layout.size),
1526                             niche_size(&tl).cmp(&niche_size(&nl)),
1527                         ) {
1528                             (Greater, _) => nl,
1529                             (Equal, Less) => nl,
1530                             _ => tl,
1531                         }
1532                     }
1533                     (tl, None) => tl,
1534                 };
1535
1536                 // Now we can intern the variant layouts and store them in the enum layout.
1537                 best_layout.layout.variants = match best_layout.layout.variants {
1538                     Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1539                         tag,
1540                         tag_encoding,
1541                         tag_field,
1542                         variants: best_layout
1543                             .variants
1544                             .into_iter()
1545                             .map(|layout| tcx.intern_layout(layout))
1546                             .collect(),
1547                     },
1548                     _ => bug!(),
1549                 };
1550
1551                 tcx.intern_layout(best_layout.layout)
1552             }
1553
1554             // Types with no meaningful known layout.
1555             ty::Projection(_) | ty::Opaque(..) => {
1556                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1557                 // if that was possible, so there's no reason to try again here.
1558                 return Err(LayoutError::Unknown(ty));
1559             }
1560
1561             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1562                 bug!("Layout::compute: unexpected type `{}`", ty)
1563             }
1564
1565             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1566                 return Err(LayoutError::Unknown(ty));
1567             }
1568         })
1569     }
1570 }
1571
1572 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1573 #[derive(Clone, Debug, PartialEq)]
1574 enum SavedLocalEligibility {
1575     Unassigned,
1576     Assigned(VariantIdx),
1577     // FIXME: Use newtype_index so we aren't wasting bytes
1578     Ineligible(Option<u32>),
1579 }
1580
1581 // When laying out generators, we divide our saved local fields into two
1582 // categories: overlap-eligible and overlap-ineligible.
1583 //
1584 // Those fields which are ineligible for overlap go in a "prefix" at the
1585 // beginning of the layout, and always have space reserved for them.
1586 //
1587 // Overlap-eligible fields are only assigned to one variant, so we lay
1588 // those fields out for each variant and put them right after the
1589 // prefix.
1590 //
1591 // Finally, in the layout details, we point to the fields from the
1592 // variants they are assigned to. It is possible for some fields to be
1593 // included in multiple variants. No field ever "moves around" in the
1594 // layout; its offset is always the same.
1595 //
1596 // Also included in the layout are the upvars and the discriminant.
1597 // These are included as fields on the "outer" layout; they are not part
1598 // of any variant.
1599 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1600     /// Compute the eligibility and assignment of each local.
1601     fn generator_saved_local_eligibility(
1602         &self,
1603         info: &GeneratorLayout<'tcx>,
1604     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1605         use SavedLocalEligibility::*;
1606
1607         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1608             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1609
1610         // The saved locals not eligible for overlap. These will get
1611         // "promoted" to the prefix of our generator.
1612         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1613
1614         // Figure out which of our saved locals are fields in only
1615         // one variant. The rest are deemed ineligible for overlap.
1616         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1617             for local in fields {
1618                 match assignments[*local] {
1619                     Unassigned => {
1620                         assignments[*local] = Assigned(variant_index);
1621                     }
1622                     Assigned(idx) => {
1623                         // We've already seen this local at another suspension
1624                         // point, so it is no longer a candidate.
1625                         trace!(
1626                             "removing local {:?} in >1 variant ({:?}, {:?})",
1627                             local,
1628                             variant_index,
1629                             idx
1630                         );
1631                         ineligible_locals.insert(*local);
1632                         assignments[*local] = Ineligible(None);
1633                     }
1634                     Ineligible(_) => {}
1635                 }
1636             }
1637         }
1638
1639         // Next, check every pair of eligible locals to see if they
1640         // conflict.
1641         for local_a in info.storage_conflicts.rows() {
1642             let conflicts_a = info.storage_conflicts.count(local_a);
1643             if ineligible_locals.contains(local_a) {
1644                 continue;
1645             }
1646
1647             for local_b in info.storage_conflicts.iter(local_a) {
1648                 // local_a and local_b are storage live at the same time, therefore they
1649                 // cannot overlap in the generator layout. The only way to guarantee
1650                 // this is if they are in the same variant, or one is ineligible
1651                 // (which means it is stored in every variant).
1652                 if ineligible_locals.contains(local_b)
1653                     || assignments[local_a] == assignments[local_b]
1654                 {
1655                     continue;
1656                 }
1657
1658                 // If they conflict, we will choose one to make ineligible.
1659                 // This is not always optimal; it's just a greedy heuristic that
1660                 // seems to produce good results most of the time.
1661                 let conflicts_b = info.storage_conflicts.count(local_b);
1662                 let (remove, other) =
1663                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1664                 ineligible_locals.insert(remove);
1665                 assignments[remove] = Ineligible(None);
1666                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1667             }
1668         }
1669
1670         // Count the number of variants in use. If only one of them, then it is
1671         // impossible to overlap any locals in our layout. In this case it's
1672         // always better to make the remaining locals ineligible, so we can
1673         // lay them out with the other locals in the prefix and eliminate
1674         // unnecessary padding bytes.
1675         {
1676             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1677             for assignment in &assignments {
1678                 if let Assigned(idx) = assignment {
1679                     used_variants.insert(*idx);
1680                 }
1681             }
1682             if used_variants.count() < 2 {
1683                 for assignment in assignments.iter_mut() {
1684                     *assignment = Ineligible(None);
1685                 }
1686                 ineligible_locals.insert_all();
1687             }
1688         }
1689
1690         // Write down the order of our locals that will be promoted to the prefix.
1691         {
1692             for (idx, local) in ineligible_locals.iter().enumerate() {
1693                 assignments[local] = Ineligible(Some(idx as u32));
1694             }
1695         }
1696         debug!("generator saved local assignments: {:?}", assignments);
1697
1698         (ineligible_locals, assignments)
1699     }
1700
1701     /// Compute the full generator layout.
1702     fn generator_layout(
1703         &self,
1704         ty: Ty<'tcx>,
1705         def_id: hir::def_id::DefId,
1706         substs: SubstsRef<'tcx>,
1707     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1708         use SavedLocalEligibility::*;
1709         let tcx = self.tcx;
1710         let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1711
1712         let Some(info) = tcx.generator_layout(def_id) else {
1713             return Err(LayoutError::Unknown(ty));
1714         };
1715         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1716
1717         // Build a prefix layout, including "promoting" all ineligible
1718         // locals as part of the prefix. We compute the layout of all of
1719         // these fields at once to get optimal packing.
1720         let tag_index = substs.as_generator().prefix_tys().count();
1721
1722         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1723         let max_discr = (info.variant_fields.len() - 1) as u128;
1724         let discr_int = Integer::fit_unsigned(max_discr);
1725         let discr_int_ty = discr_int.to_ty(tcx, false);
1726         let tag = Scalar::Initialized {
1727             value: Primitive::Int(discr_int, false),
1728             valid_range: WrappingRange { start: 0, end: max_discr },
1729         };
1730         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1731         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1732
1733         let promoted_layouts = ineligible_locals
1734             .iter()
1735             .map(|local| subst_field(info.field_tys[local]))
1736             .map(|ty| tcx.mk_maybe_uninit(ty))
1737             .map(|ty| self.layout_of(ty));
1738         let prefix_layouts = substs
1739             .as_generator()
1740             .prefix_tys()
1741             .map(|ty| self.layout_of(ty))
1742             .chain(iter::once(Ok(tag_layout)))
1743             .chain(promoted_layouts)
1744             .collect::<Result<Vec<_>, _>>()?;
1745         let prefix = self.univariant_uninterned(
1746             ty,
1747             &prefix_layouts,
1748             &ReprOptions::default(),
1749             StructKind::AlwaysSized,
1750         )?;
1751
1752         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1753
1754         // Split the prefix layout into the "outer" fields (upvars and
1755         // discriminant) and the "promoted" fields. Promoted fields will
1756         // get included in each variant that requested them in
1757         // GeneratorLayout.
1758         debug!("prefix = {:#?}", prefix);
1759         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1760             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1761                 let mut inverse_memory_index = invert_mapping(&memory_index);
1762
1763                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1764                 // "outer" and "promoted" fields respectively.
1765                 let b_start = (tag_index + 1) as u32;
1766                 let offsets_b = offsets.split_off(b_start as usize);
1767                 let offsets_a = offsets;
1768
1769                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1770                 // by preserving the order but keeping only one disjoint "half" each.
1771                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1772                 let inverse_memory_index_b: Vec<_> =
1773                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1774                 inverse_memory_index.retain(|&i| i < b_start);
1775                 let inverse_memory_index_a = inverse_memory_index;
1776
1777                 // Since `inverse_memory_index_{a,b}` each only refer to their
1778                 // respective fields, they can be safely inverted
1779                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1780                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1781
1782                 let outer_fields =
1783                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1784                 (outer_fields, offsets_b, memory_index_b)
1785             }
1786             _ => bug!(),
1787         };
1788
1789         let mut size = prefix.size;
1790         let mut align = prefix.align;
1791         let variants = info
1792             .variant_fields
1793             .iter_enumerated()
1794             .map(|(index, variant_fields)| {
1795                 // Only include overlap-eligible fields when we compute our variant layout.
1796                 let variant_only_tys = variant_fields
1797                     .iter()
1798                     .filter(|local| match assignments[**local] {
1799                         Unassigned => bug!(),
1800                         Assigned(v) if v == index => true,
1801                         Assigned(_) => bug!("assignment does not match variant"),
1802                         Ineligible(_) => false,
1803                     })
1804                     .map(|local| subst_field(info.field_tys[*local]));
1805
1806                 let mut variant = self.univariant_uninterned(
1807                     ty,
1808                     &variant_only_tys
1809                         .map(|ty| self.layout_of(ty))
1810                         .collect::<Result<Vec<_>, _>>()?,
1811                     &ReprOptions::default(),
1812                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1813                 )?;
1814                 variant.variants = Variants::Single { index };
1815
1816                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1817                     bug!();
1818                 };
1819
1820                 // Now, stitch the promoted and variant-only fields back together in
1821                 // the order they are mentioned by our GeneratorLayout.
1822                 // Because we only use some subset (that can differ between variants)
1823                 // of the promoted fields, we can't just pick those elements of the
1824                 // `promoted_memory_index` (as we'd end up with gaps).
1825                 // So instead, we build an "inverse memory_index", as if all of the
1826                 // promoted fields were being used, but leave the elements not in the
1827                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1828                 // obtain a valid (bijective) mapping.
1829                 const INVALID_FIELD_IDX: u32 = !0;
1830                 let mut combined_inverse_memory_index =
1831                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1832                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1833                 let combined_offsets = variant_fields
1834                     .iter()
1835                     .enumerate()
1836                     .map(|(i, local)| {
1837                         let (offset, memory_index) = match assignments[*local] {
1838                             Unassigned => bug!(),
1839                             Assigned(_) => {
1840                                 let (offset, memory_index) =
1841                                     offsets_and_memory_index.next().unwrap();
1842                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1843                             }
1844                             Ineligible(field_idx) => {
1845                                 let field_idx = field_idx.unwrap() as usize;
1846                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1847                             }
1848                         };
1849                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1850                         offset
1851                     })
1852                     .collect();
1853
1854                 // Remove the unused slots and invert the mapping to obtain the
1855                 // combined `memory_index` (also see previous comment).
1856                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1857                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1858
1859                 variant.fields = FieldsShape::Arbitrary {
1860                     offsets: combined_offsets,
1861                     memory_index: combined_memory_index,
1862                 };
1863
1864                 size = size.max(variant.size);
1865                 align = align.max(variant.align);
1866                 Ok(tcx.intern_layout(variant))
1867             })
1868             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1869
1870         size = size.align_to(align.abi);
1871
1872         let abi =
1873             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1874                 Abi::Uninhabited
1875             } else {
1876                 Abi::Aggregate { sized: true }
1877             };
1878
1879         let layout = tcx.intern_layout(LayoutS {
1880             variants: Variants::Multiple {
1881                 tag,
1882                 tag_encoding: TagEncoding::Direct,
1883                 tag_field: tag_index,
1884                 variants,
1885             },
1886             fields: outer_fields,
1887             abi,
1888             largest_niche: prefix.largest_niche,
1889             size,
1890             align,
1891         });
1892         debug!("generator layout ({:?}): {:#?}", ty, layout);
1893         Ok(layout)
1894     }
1895
1896     /// This is invoked by the `layout_of` query to record the final
1897     /// layout of each type.
1898     #[inline(always)]
1899     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1900         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1901         // for dumping later.
1902         if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1903             self.record_layout_for_printing_outlined(layout)
1904         }
1905     }
1906
1907     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1908         // Ignore layouts that are done with non-empty environments or
1909         // non-monomorphic layouts, as the user only wants to see the stuff
1910         // resulting from the final codegen session.
1911         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1912             return;
1913         }
1914
1915         // (delay format until we actually need it)
1916         let record = |kind, packed, opt_discr_size, variants| {
1917             let type_desc = format!("{:?}", layout.ty);
1918             self.tcx.sess.code_stats.record_type_size(
1919                 kind,
1920                 type_desc,
1921                 layout.align.abi,
1922                 layout.size,
1923                 packed,
1924                 opt_discr_size,
1925                 variants,
1926             );
1927         };
1928
1929         let adt_def = match *layout.ty.kind() {
1930             ty::Adt(ref adt_def, _) => {
1931                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1932                 adt_def
1933             }
1934
1935             ty::Closure(..) => {
1936                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1937                 record(DataTypeKind::Closure, false, None, vec![]);
1938                 return;
1939             }
1940
1941             _ => {
1942                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1943                 return;
1944             }
1945         };
1946
1947         let adt_kind = adt_def.adt_kind();
1948         let adt_packed = adt_def.repr().pack.is_some();
1949
1950         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1951             let mut min_size = Size::ZERO;
1952             let field_info: Vec<_> = flds
1953                 .iter()
1954                 .enumerate()
1955                 .map(|(i, &name)| {
1956                     let field_layout = layout.field(self, i);
1957                     let offset = layout.fields.offset(i);
1958                     let field_end = offset + field_layout.size;
1959                     if min_size < field_end {
1960                         min_size = field_end;
1961                     }
1962                     FieldInfo {
1963                         name,
1964                         offset: offset.bytes(),
1965                         size: field_layout.size.bytes(),
1966                         align: field_layout.align.abi.bytes(),
1967                     }
1968                 })
1969                 .collect();
1970
1971             VariantInfo {
1972                 name: n,
1973                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1974                 align: layout.align.abi.bytes(),
1975                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1976                 fields: field_info,
1977             }
1978         };
1979
1980         match layout.variants {
1981             Variants::Single { index } => {
1982                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1983                     debug!(
1984                         "print-type-size `{:#?}` variant {}",
1985                         layout,
1986                         adt_def.variant(index).name
1987                     );
1988                     let variant_def = &adt_def.variant(index);
1989                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1990                     record(
1991                         adt_kind.into(),
1992                         adt_packed,
1993                         None,
1994                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1995                     );
1996                 } else {
1997                     // (This case arises for *empty* enums; so give it
1998                     // zero variants.)
1999                     record(adt_kind.into(), adt_packed, None, vec![]);
2000                 }
2001             }
2002
2003             Variants::Multiple { tag, ref tag_encoding, .. } => {
2004                 debug!(
2005                     "print-type-size `{:#?}` adt general variants def {}",
2006                     layout.ty,
2007                     adt_def.variants().len()
2008                 );
2009                 let variant_infos: Vec<_> = adt_def
2010                     .variants()
2011                     .iter_enumerated()
2012                     .map(|(i, variant_def)| {
2013                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2014                         build_variant_info(
2015                             Some(variant_def.name),
2016                             &fields,
2017                             layout.for_variant(self, i),
2018                         )
2019                     })
2020                     .collect();
2021                 record(
2022                     adt_kind.into(),
2023                     adt_packed,
2024                     match tag_encoding {
2025                         TagEncoding::Direct => Some(tag.size(self)),
2026                         _ => None,
2027                     },
2028                     variant_infos,
2029                 );
2030             }
2031         }
2032     }
2033 }
2034
2035 /// Type size "skeleton", i.e., the only information determining a type's size.
2036 /// While this is conservative, (aside from constant sizes, only pointers,
2037 /// newtypes thereof and null pointer optimized enums are allowed), it is
2038 /// enough to statically check common use cases of transmute.
2039 #[derive(Copy, Clone, Debug)]
2040 pub enum SizeSkeleton<'tcx> {
2041     /// Any statically computable Layout.
2042     Known(Size),
2043
2044     /// A potentially-fat pointer.
2045     Pointer {
2046         /// If true, this pointer is never null.
2047         non_zero: bool,
2048         /// The type which determines the unsized metadata, if any,
2049         /// of this pointer. Either a type parameter or a projection
2050         /// depending on one, with regions erased.
2051         tail: Ty<'tcx>,
2052     },
2053 }
2054
2055 impl<'tcx> SizeSkeleton<'tcx> {
2056     pub fn compute(
2057         ty: Ty<'tcx>,
2058         tcx: TyCtxt<'tcx>,
2059         param_env: ty::ParamEnv<'tcx>,
2060     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2061         debug_assert!(!ty.has_infer_types_or_consts());
2062
2063         // First try computing a static layout.
2064         let err = match tcx.layout_of(param_env.and(ty)) {
2065             Ok(layout) => {
2066                 return Ok(SizeSkeleton::Known(layout.size));
2067             }
2068             Err(err) => err,
2069         };
2070
2071         match *ty.kind() {
2072             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2073                 let non_zero = !ty.is_unsafe_ptr();
2074                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2075                 match tail.kind() {
2076                     ty::Param(_) | ty::Projection(_) => {
2077                         debug_assert!(tail.has_param_types_or_consts());
2078                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2079                     }
2080                     _ => bug!(
2081                         "SizeSkeleton::compute({}): layout errored ({}), yet \
2082                               tail `{}` is not a type parameter or a projection",
2083                         ty,
2084                         err,
2085                         tail
2086                     ),
2087                 }
2088             }
2089
2090             ty::Adt(def, substs) => {
2091                 // Only newtypes and enums w/ nullable pointer optimization.
2092                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2093                     return Err(err);
2094                 }
2095
2096                 // Get a zero-sized variant or a pointer newtype.
2097                 let zero_or_ptr_variant = |i| {
2098                     let i = VariantIdx::new(i);
2099                     let fields =
2100                         def.variant(i).fields.iter().map(|field| {
2101                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2102                         });
2103                     let mut ptr = None;
2104                     for field in fields {
2105                         let field = field?;
2106                         match field {
2107                             SizeSkeleton::Known(size) => {
2108                                 if size.bytes() > 0 {
2109                                     return Err(err);
2110                                 }
2111                             }
2112                             SizeSkeleton::Pointer { .. } => {
2113                                 if ptr.is_some() {
2114                                     return Err(err);
2115                                 }
2116                                 ptr = Some(field);
2117                             }
2118                         }
2119                     }
2120                     Ok(ptr)
2121                 };
2122
2123                 let v0 = zero_or_ptr_variant(0)?;
2124                 // Newtype.
2125                 if def.variants().len() == 1 {
2126                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2127                         return Ok(SizeSkeleton::Pointer {
2128                             non_zero: non_zero
2129                                 || match tcx.layout_scalar_valid_range(def.did()) {
2130                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2131                                     (Bound::Included(start), Bound::Included(end)) => {
2132                                         0 < start && start < end
2133                                     }
2134                                     _ => false,
2135                                 },
2136                             tail,
2137                         });
2138                     } else {
2139                         return Err(err);
2140                     }
2141                 }
2142
2143                 let v1 = zero_or_ptr_variant(1)?;
2144                 // Nullable pointer enum optimization.
2145                 match (v0, v1) {
2146                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2147                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2148                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2149                     }
2150                     _ => Err(err),
2151                 }
2152             }
2153
2154             ty::Projection(_) | ty::Opaque(..) => {
2155                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2156                 if ty == normalized {
2157                     Err(err)
2158                 } else {
2159                     SizeSkeleton::compute(normalized, tcx, param_env)
2160                 }
2161             }
2162
2163             _ => Err(err),
2164         }
2165     }
2166
2167     pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2168         match (self, other) {
2169             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2170             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2171                 a == b
2172             }
2173             _ => false,
2174         }
2175     }
2176 }
2177
2178 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2179     fn tcx(&self) -> TyCtxt<'tcx>;
2180 }
2181
2182 pub trait HasParamEnv<'tcx> {
2183     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2184 }
2185
2186 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2187     #[inline]
2188     fn data_layout(&self) -> &TargetDataLayout {
2189         &self.data_layout
2190     }
2191 }
2192
2193 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2194     fn target_spec(&self) -> &Target {
2195         &self.sess.target
2196     }
2197 }
2198
2199 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2200     #[inline]
2201     fn tcx(&self) -> TyCtxt<'tcx> {
2202         *self
2203     }
2204 }
2205
2206 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2207     #[inline]
2208     fn data_layout(&self) -> &TargetDataLayout {
2209         &self.data_layout
2210     }
2211 }
2212
2213 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2214     fn target_spec(&self) -> &Target {
2215         &self.sess.target
2216     }
2217 }
2218
2219 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2220     #[inline]
2221     fn tcx(&self) -> TyCtxt<'tcx> {
2222         **self
2223     }
2224 }
2225
2226 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2227     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2228         self.param_env
2229     }
2230 }
2231
2232 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2233     fn data_layout(&self) -> &TargetDataLayout {
2234         self.tcx.data_layout()
2235     }
2236 }
2237
2238 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2239     fn target_spec(&self) -> &Target {
2240         self.tcx.target_spec()
2241     }
2242 }
2243
2244 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2245     fn tcx(&self) -> TyCtxt<'tcx> {
2246         self.tcx.tcx()
2247     }
2248 }
2249
2250 pub trait MaybeResult<T> {
2251     type Error;
2252
2253     fn from(x: Result<T, Self::Error>) -> Self;
2254     fn to_result(self) -> Result<T, Self::Error>;
2255 }
2256
2257 impl<T> MaybeResult<T> for T {
2258     type Error = !;
2259
2260     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2261         x
2262     }
2263     fn to_result(self) -> Result<T, Self::Error> {
2264         Ok(self)
2265     }
2266 }
2267
2268 impl<T, E> MaybeResult<T> for Result<T, E> {
2269     type Error = E;
2270
2271     fn from(x: Result<T, Self::Error>) -> Self {
2272         x
2273     }
2274     fn to_result(self) -> Result<T, Self::Error> {
2275         self
2276     }
2277 }
2278
2279 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2280
2281 /// Trait for contexts that want to be able to compute layouts of types.
2282 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2283 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2284     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2285     /// returned from `layout_of` (see also `handle_layout_err`).
2286     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2287
2288     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2289     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2290     #[inline]
2291     fn layout_tcx_at_span(&self) -> Span {
2292         DUMMY_SP
2293     }
2294
2295     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2296     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2297     ///
2298     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2299     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2300     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2301     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2302     fn handle_layout_err(
2303         &self,
2304         err: LayoutError<'tcx>,
2305         span: Span,
2306         ty: Ty<'tcx>,
2307     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2308 }
2309
2310 /// Blanket extension trait for contexts that can compute layouts of types.
2311 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2312     /// Computes the layout of a type. Note that this implicitly
2313     /// executes in "reveal all" mode, and will normalize the input type.
2314     #[inline]
2315     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2316         self.spanned_layout_of(ty, DUMMY_SP)
2317     }
2318
2319     /// Computes the layout of a type, at `span`. Note that this implicitly
2320     /// executes in "reveal all" mode, and will normalize the input type.
2321     // FIXME(eddyb) avoid passing information like this, and instead add more
2322     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2323     #[inline]
2324     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2325         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2326         let tcx = self.tcx().at(span);
2327
2328         MaybeResult::from(
2329             tcx.layout_of(self.param_env().and(ty))
2330                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2331         )
2332     }
2333 }
2334
2335 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2336
2337 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2338     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2339
2340     #[inline]
2341     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2342         err
2343     }
2344 }
2345
2346 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2347     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2348
2349     #[inline]
2350     fn layout_tcx_at_span(&self) -> Span {
2351         self.tcx.span
2352     }
2353
2354     #[inline]
2355     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2356         err
2357     }
2358 }
2359
2360 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2361 where
2362     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2363 {
2364     fn ty_and_layout_for_variant(
2365         this: TyAndLayout<'tcx>,
2366         cx: &C,
2367         variant_index: VariantIdx,
2368     ) -> TyAndLayout<'tcx> {
2369         let layout = match this.variants {
2370             Variants::Single { index }
2371                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2372                 if index == variant_index &&
2373                 // Don't confuse variants of uninhabited enums with the enum itself.
2374                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2375                 this.fields != FieldsShape::Primitive =>
2376             {
2377                 this.layout
2378             }
2379
2380             Variants::Single { index } => {
2381                 let tcx = cx.tcx();
2382                 let param_env = cx.param_env();
2383
2384                 // Deny calling for_variant more than once for non-Single enums.
2385                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2386                     assert_eq!(original_layout.variants, Variants::Single { index });
2387                 }
2388
2389                 let fields = match this.ty.kind() {
2390                     ty::Adt(def, _) if def.variants().is_empty() =>
2391                         bug!("for_variant called on zero-variant enum"),
2392                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2393                     _ => bug!(),
2394                 };
2395                 tcx.intern_layout(LayoutS {
2396                     variants: Variants::Single { index: variant_index },
2397                     fields: match NonZeroUsize::new(fields) {
2398                         Some(fields) => FieldsShape::Union(fields),
2399                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2400                     },
2401                     abi: Abi::Uninhabited,
2402                     largest_niche: None,
2403                     align: tcx.data_layout.i8_align,
2404                     size: Size::ZERO,
2405                 })
2406             }
2407
2408             Variants::Multiple { ref variants, .. } => variants[variant_index],
2409         };
2410
2411         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2412
2413         TyAndLayout { ty: this.ty, layout }
2414     }
2415
2416     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2417         enum TyMaybeWithLayout<'tcx> {
2418             Ty(Ty<'tcx>),
2419             TyAndLayout(TyAndLayout<'tcx>),
2420         }
2421
2422         fn field_ty_or_layout<'tcx>(
2423             this: TyAndLayout<'tcx>,
2424             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2425             i: usize,
2426         ) -> TyMaybeWithLayout<'tcx> {
2427             let tcx = cx.tcx();
2428             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2429                 TyAndLayout {
2430                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2431                     ty: tag.primitive().to_ty(tcx),
2432                 }
2433             };
2434
2435             match *this.ty.kind() {
2436                 ty::Bool
2437                 | ty::Char
2438                 | ty::Int(_)
2439                 | ty::Uint(_)
2440                 | ty::Float(_)
2441                 | ty::FnPtr(_)
2442                 | ty::Never
2443                 | ty::FnDef(..)
2444                 | ty::GeneratorWitness(..)
2445                 | ty::Foreign(..)
2446                 | ty::Dynamic(_, _, ty::Dyn) => {
2447                     bug!("TyAndLayout::field({:?}): not applicable", this)
2448                 }
2449
2450                 // Potentially-fat pointers.
2451                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2452                     assert!(i < this.fields.count());
2453
2454                     // Reuse the fat `*T` type as its own thin pointer data field.
2455                     // This provides information about, e.g., DST struct pointees
2456                     // (which may have no non-DST form), and will work as long
2457                     // as the `Abi` or `FieldsShape` is checked by users.
2458                     if i == 0 {
2459                         let nil = tcx.mk_unit();
2460                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2461                             tcx.mk_mut_ptr(nil)
2462                         } else {
2463                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2464                         };
2465
2466                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2467                         // the `Result` should always work because the type is
2468                         // always either `*mut ()` or `&'static mut ()`.
2469                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2470                             ty: this.ty,
2471                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2472                         });
2473                     }
2474
2475                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2476                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2477                         ty::Dynamic(_, _, ty::Dyn) => {
2478                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2479                                 tcx.lifetimes.re_static,
2480                                 tcx.mk_array(tcx.types.usize, 3),
2481                             ))
2482                             /* FIXME: use actual fn pointers
2483                             Warning: naively computing the number of entries in the
2484                             vtable by counting the methods on the trait + methods on
2485                             all parent traits does not work, because some methods can
2486                             be not object safe and thus excluded from the vtable.
2487                             Increase this counter if you tried to implement this but
2488                             failed to do it without duplicating a lot of code from
2489                             other places in the compiler: 2
2490                             tcx.mk_tup(&[
2491                                 tcx.mk_array(tcx.types.usize, 3),
2492                                 tcx.mk_array(Option<fn()>),
2493                             ])
2494                             */
2495                         }
2496                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2497                     }
2498                 }
2499
2500                 // Arrays and slices.
2501                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2502                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2503
2504                 // Tuples, generators and closures.
2505                 ty::Closure(_, ref substs) => field_ty_or_layout(
2506                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2507                     cx,
2508                     i,
2509                 ),
2510
2511                 ty::Generator(def_id, ref substs, _) => match this.variants {
2512                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2513                         substs
2514                             .as_generator()
2515                             .state_tys(def_id, tcx)
2516                             .nth(index.as_usize())
2517                             .unwrap()
2518                             .nth(i)
2519                             .unwrap(),
2520                     ),
2521                     Variants::Multiple { tag, tag_field, .. } => {
2522                         if i == tag_field {
2523                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2524                         }
2525                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2526                     }
2527                 },
2528
2529                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2530
2531                 // ADTs.
2532                 ty::Adt(def, substs) => {
2533                     match this.variants {
2534                         Variants::Single { index } => {
2535                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2536                         }
2537
2538                         // Discriminant field for enums (where applicable).
2539                         Variants::Multiple { tag, .. } => {
2540                             assert_eq!(i, 0);
2541                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2542                         }
2543                     }
2544                 }
2545
2546                 ty::Dynamic(_, _, ty::DynStar) => {
2547                     if i == 0 {
2548                         TyMaybeWithLayout::Ty(tcx.types.usize)
2549                     } else if i == 1 {
2550                         // FIXME(dyn-star) same FIXME as above applies here too
2551                         TyMaybeWithLayout::Ty(
2552                             tcx.mk_imm_ref(
2553                                 tcx.lifetimes.re_static,
2554                                 tcx.mk_array(tcx.types.usize, 3),
2555                             ),
2556                         )
2557                     } else {
2558                         bug!("no field {i} on dyn*")
2559                     }
2560                 }
2561
2562                 ty::Projection(_)
2563                 | ty::Bound(..)
2564                 | ty::Placeholder(..)
2565                 | ty::Opaque(..)
2566                 | ty::Param(_)
2567                 | ty::Infer(_)
2568                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2569             }
2570         }
2571
2572         match field_ty_or_layout(this, cx, i) {
2573             TyMaybeWithLayout::Ty(field_ty) => {
2574                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2575                     bug!(
2576                         "failed to get layout for `{}`: {},\n\
2577                          despite it being a field (#{}) of an existing layout: {:#?}",
2578                         field_ty,
2579                         e,
2580                         i,
2581                         this
2582                     )
2583                 })
2584             }
2585             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2586         }
2587     }
2588
2589     fn ty_and_layout_pointee_info_at(
2590         this: TyAndLayout<'tcx>,
2591         cx: &C,
2592         offset: Size,
2593     ) -> Option<PointeeInfo> {
2594         let tcx = cx.tcx();
2595         let param_env = cx.param_env();
2596
2597         let addr_space_of_ty = |ty: Ty<'tcx>| {
2598             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2599         };
2600
2601         let pointee_info = match *this.ty.kind() {
2602             ty::RawPtr(mt) if offset.bytes() == 0 => {
2603                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2604                     size: layout.size,
2605                     align: layout.align.abi,
2606                     safe: None,
2607                     address_space: addr_space_of_ty(mt.ty),
2608                 })
2609             }
2610             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2611                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2612                     size: layout.size,
2613                     align: layout.align.abi,
2614                     safe: None,
2615                     address_space: cx.data_layout().instruction_address_space,
2616                 })
2617             }
2618             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2619                 let address_space = addr_space_of_ty(ty);
2620                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2621                     // Use conservative pointer kind if not optimizing. This saves us the
2622                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2623                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2624                     PointerKind::SharedMutable
2625                 } else {
2626                     match mt {
2627                         hir::Mutability::Not => {
2628                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2629                                 PointerKind::Frozen
2630                             } else {
2631                                 PointerKind::SharedMutable
2632                             }
2633                         }
2634                         hir::Mutability::Mut => {
2635                             // References to self-referential structures should not be considered
2636                             // noalias, as another pointer to the structure can be obtained, that
2637                             // is not based-on the original reference. We consider all !Unpin
2638                             // types to be potentially self-referential here.
2639                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2640                                 PointerKind::UniqueBorrowed
2641                             } else {
2642                                 PointerKind::UniqueBorrowedPinned
2643                             }
2644                         }
2645                     }
2646                 };
2647
2648                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2649                     size: layout.size,
2650                     align: layout.align.abi,
2651                     safe: Some(kind),
2652                     address_space,
2653                 })
2654             }
2655
2656             _ => {
2657                 let mut data_variant = match this.variants {
2658                     // Within the discriminant field, only the niche itself is
2659                     // always initialized, so we only check for a pointer at its
2660                     // offset.
2661                     //
2662                     // If the niche is a pointer, it's either valid (according
2663                     // to its type), or null (which the niche field's scalar
2664                     // validity range encodes).  This allows using
2665                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2666                     // this will continue to work as long as we don't start
2667                     // using more niches than just null (e.g., the first page of
2668                     // the address space, or unaligned pointers).
2669                     Variants::Multiple {
2670                         tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2671                         tag_field,
2672                         ..
2673                     } if this.fields.offset(tag_field) == offset => {
2674                         Some(this.for_variant(cx, untagged_variant))
2675                     }
2676                     _ => Some(this),
2677                 };
2678
2679                 if let Some(variant) = data_variant {
2680                     // We're not interested in any unions.
2681                     if let FieldsShape::Union(_) = variant.fields {
2682                         data_variant = None;
2683                     }
2684                 }
2685
2686                 let mut result = None;
2687
2688                 if let Some(variant) = data_variant {
2689                     let ptr_end = offset + Pointer.size(cx);
2690                     for i in 0..variant.fields.count() {
2691                         let field_start = variant.fields.offset(i);
2692                         if field_start <= offset {
2693                             let field = variant.field(cx, i);
2694                             result = field.to_result().ok().and_then(|field| {
2695                                 if ptr_end <= field_start + field.size {
2696                                     // We found the right field, look inside it.
2697                                     let field_info =
2698                                         field.pointee_info_at(cx, offset - field_start);
2699                                     field_info
2700                                 } else {
2701                                     None
2702                                 }
2703                             });
2704                             if result.is_some() {
2705                                 break;
2706                             }
2707                         }
2708                     }
2709                 }
2710
2711                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2712                 if let Some(ref mut pointee) = result {
2713                     if let ty::Adt(def, _) = this.ty.kind() {
2714                         if def.is_box() && offset.bytes() == 0 {
2715                             pointee.safe = Some(PointerKind::UniqueOwned);
2716                         }
2717                     }
2718                 }
2719
2720                 result
2721             }
2722         };
2723
2724         debug!(
2725             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2726             offset,
2727             this.ty.kind(),
2728             pointee_info
2729         );
2730
2731         pointee_info
2732     }
2733
2734     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2735         matches!(this.ty.kind(), ty::Adt(..))
2736     }
2737
2738     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2739         this.ty.kind() == &ty::Never
2740     }
2741
2742     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2743         matches!(this.ty.kind(), ty::Tuple(..))
2744     }
2745
2746     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2747         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2748     }
2749 }
2750
2751 impl<'tcx> ty::Instance<'tcx> {
2752     // NOTE(eddyb) this is private to avoid using it from outside of
2753     // `fn_abi_of_instance` - any other uses are either too high-level
2754     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2755     // or should go through `FnAbi` instead, to avoid losing any
2756     // adjustments `fn_abi_of_instance` might be performing.
2757     #[tracing::instrument(level = "debug", skip(tcx, param_env))]
2758     fn fn_sig_for_fn_abi(
2759         &self,
2760         tcx: TyCtxt<'tcx>,
2761         param_env: ty::ParamEnv<'tcx>,
2762     ) -> ty::PolyFnSig<'tcx> {
2763         let ty = self.ty(tcx, param_env);
2764         match *ty.kind() {
2765             ty::FnDef(..) => {
2766                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2767                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2768                 // (i.e. due to being inside a projection that got normalized, see
2769                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2770                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2771                 let mut sig = match *ty.kind() {
2772                     ty::FnDef(def_id, substs) => tcx
2773                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
2774                         .subst(tcx, substs),
2775                     _ => unreachable!(),
2776                 };
2777
2778                 if let ty::InstanceDef::VTableShim(..) = self.def {
2779                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2780                     sig = sig.map_bound(|mut sig| {
2781                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2782                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2783                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2784                         sig
2785                     });
2786                 }
2787                 sig
2788             }
2789             ty::Closure(def_id, substs) => {
2790                 let sig = substs.as_closure().sig();
2791
2792                 let bound_vars = tcx.mk_bound_variable_kinds(
2793                     sig.bound_vars()
2794                         .iter()
2795                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2796                 );
2797                 let br = ty::BoundRegion {
2798                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2799                     kind: ty::BoundRegionKind::BrEnv,
2800                 };
2801                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2802                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2803
2804                 let sig = sig.skip_binder();
2805                 ty::Binder::bind_with_vars(
2806                     tcx.mk_fn_sig(
2807                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2808                         sig.output(),
2809                         sig.c_variadic,
2810                         sig.unsafety,
2811                         sig.abi,
2812                     ),
2813                     bound_vars,
2814                 )
2815             }
2816             ty::Generator(_, substs, _) => {
2817                 let sig = substs.as_generator().poly_sig();
2818
2819                 let bound_vars = tcx.mk_bound_variable_kinds(
2820                     sig.bound_vars()
2821                         .iter()
2822                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2823                 );
2824                 let br = ty::BoundRegion {
2825                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2826                     kind: ty::BoundRegionKind::BrEnv,
2827                 };
2828                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2829                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2830
2831                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2832                 let pin_adt_ref = tcx.adt_def(pin_did);
2833                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2834                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2835
2836                 let sig = sig.skip_binder();
2837                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2838                 let state_adt_ref = tcx.adt_def(state_did);
2839                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2840                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2841                 ty::Binder::bind_with_vars(
2842                     tcx.mk_fn_sig(
2843                         [env_ty, sig.resume_ty].iter(),
2844                         &ret_ty,
2845                         false,
2846                         hir::Unsafety::Normal,
2847                         rustc_target::spec::abi::Abi::Rust,
2848                     ),
2849                     bound_vars,
2850                 )
2851             }
2852             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2853         }
2854     }
2855 }
2856
2857 /// Calculates whether a function's ABI can unwind or not.
2858 ///
2859 /// This takes two primary parameters:
2860 ///
2861 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2862 ///   codegen attrs for a defined function. For function pointers this set of
2863 ///   flags is the empty set. This is only applicable for Rust-defined
2864 ///   functions, and generally isn't needed except for small optimizations where
2865 ///   we try to say a function which otherwise might look like it could unwind
2866 ///   doesn't actually unwind (such as for intrinsics and such).
2867 ///
2868 /// * `abi` - this is the ABI that the function is defined with. This is the
2869 ///   primary factor for determining whether a function can unwind or not.
2870 ///
2871 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2872 /// panics are implemented with unwinds on most platform (when
2873 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2874 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2875 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2876 /// defined for each ABI individually, but it always corresponds to some form of
2877 /// stack-based unwinding (the exact mechanism of which varies
2878 /// platform-by-platform).
2879 ///
2880 /// Rust functions are classified whether or not they can unwind based on the
2881 /// active "panic strategy". In other words Rust functions are considered to
2882 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2883 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2884 /// only if the final panic mode is panic=abort. In this scenario any code
2885 /// previously compiled assuming that a function can unwind is still correct, it
2886 /// just never happens to actually unwind at runtime.
2887 ///
2888 /// This function's answer to whether or not a function can unwind is quite
2889 /// impactful throughout the compiler. This affects things like:
2890 ///
2891 /// * Calling a function which can't unwind means codegen simply ignores any
2892 ///   associated unwinding cleanup.
2893 /// * Calling a function which can unwind from a function which can't unwind
2894 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2895 ///   aborts the process.
2896 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2897 ///   affects various optimizations and codegen.
2898 ///
2899 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2900 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2901 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2902 /// might (from a foreign exception or similar).
2903 #[inline]
2904 #[tracing::instrument(level = "debug", skip(tcx))]
2905 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2906     if let Some(did) = fn_def_id {
2907         // Special attribute for functions which can't unwind.
2908         if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2909             return false;
2910         }
2911
2912         // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2913         //
2914         // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2915         // function defined in Rust is also required to abort.
2916         if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2917             return false;
2918         }
2919
2920         // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2921         //
2922         // This is not part of `codegen_fn_attrs` as it can differ between crates
2923         // and therefore cannot be computed in core.
2924         if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2925             if Some(did) == tcx.lang_items().drop_in_place_fn() {
2926                 return false;
2927             }
2928         }
2929     }
2930
2931     // Otherwise if this isn't special then unwinding is generally determined by
2932     // the ABI of the itself. ABIs like `C` have variants which also
2933     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2934     // ABIs have such an option. Otherwise the only other thing here is Rust
2935     // itself, and those ABIs are determined by the panic strategy configured
2936     // for this compilation.
2937     //
2938     // Unfortunately at this time there's also another caveat. Rust [RFC
2939     // 2945][rfc] has been accepted and is in the process of being implemented
2940     // and stabilized. In this interim state we need to deal with historical
2941     // rustc behavior as well as plan for future rustc behavior.
2942     //
2943     // Historically functions declared with `extern "C"` were marked at the
2944     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2945     // or not. This is UB for functions in `panic=unwind` mode that then
2946     // actually panic and unwind. Note that this behavior is true for both
2947     // externally declared functions as well as Rust-defined function.
2948     //
2949     // To fix this UB rustc would like to change in the future to catch unwinds
2950     // from function calls that may unwind within a Rust-defined `extern "C"`
2951     // function and forcibly abort the process, thereby respecting the
2952     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2953     // ready to roll out, so determining whether or not the `C` family of ABIs
2954     // unwinds is conditional not only on their definition but also whether the
2955     // `#![feature(c_unwind)]` feature gate is active.
2956     //
2957     // Note that this means that unlike historical compilers rustc now, by
2958     // default, unconditionally thinks that the `C` ABI may unwind. This will
2959     // prevent some optimization opportunities, however, so we try to scope this
2960     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2961     // to `panic=abort`).
2962     //
2963     // Eventually the check against `c_unwind` here will ideally get removed and
2964     // this'll be a little cleaner as it'll be a straightforward check of the
2965     // ABI.
2966     //
2967     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2968     use SpecAbi::*;
2969     match abi {
2970         C { unwind }
2971         | System { unwind }
2972         | Cdecl { unwind }
2973         | Stdcall { unwind }
2974         | Fastcall { unwind }
2975         | Vectorcall { unwind }
2976         | Thiscall { unwind }
2977         | Aapcs { unwind }
2978         | Win64 { unwind }
2979         | SysV64 { unwind } => {
2980             unwind
2981                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2982         }
2983         PtxKernel
2984         | Msp430Interrupt
2985         | X86Interrupt
2986         | AmdGpuKernel
2987         | EfiApi
2988         | AvrInterrupt
2989         | AvrNonBlockingInterrupt
2990         | CCmseNonSecureCall
2991         | Wasm
2992         | RustIntrinsic
2993         | PlatformIntrinsic
2994         | Unadjusted => false,
2995         Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2996     }
2997 }
2998
2999 #[inline]
3000 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
3001     use rustc_target::spec::abi::Abi::*;
3002     match tcx.sess.target.adjust_abi(abi) {
3003         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
3004         RustCold => Conv::RustCold,
3005
3006         // It's the ABI's job to select this, not ours.
3007         System { .. } => bug!("system abi should be selected elsewhere"),
3008         EfiApi => bug!("eficall abi should be selected elsewhere"),
3009
3010         Stdcall { .. } => Conv::X86Stdcall,
3011         Fastcall { .. } => Conv::X86Fastcall,
3012         Vectorcall { .. } => Conv::X86VectorCall,
3013         Thiscall { .. } => Conv::X86ThisCall,
3014         C { .. } => Conv::C,
3015         Unadjusted => Conv::C,
3016         Win64 { .. } => Conv::X86_64Win64,
3017         SysV64 { .. } => Conv::X86_64SysV,
3018         Aapcs { .. } => Conv::ArmAapcs,
3019         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
3020         PtxKernel => Conv::PtxKernel,
3021         Msp430Interrupt => Conv::Msp430Intr,
3022         X86Interrupt => Conv::X86Intr,
3023         AmdGpuKernel => Conv::AmdGpuKernel,
3024         AvrInterrupt => Conv::AvrInterrupt,
3025         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3026         Wasm => Conv::C,
3027
3028         // These API constants ought to be more specific...
3029         Cdecl { .. } => Conv::C,
3030     }
3031 }
3032
3033 /// Error produced by attempting to compute or adjust a `FnAbi`.
3034 #[derive(Copy, Clone, Debug, HashStable)]
3035 pub enum FnAbiError<'tcx> {
3036     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3037     Layout(LayoutError<'tcx>),
3038
3039     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3040     AdjustForForeignAbi(call::AdjustForForeignAbiError),
3041 }
3042
3043 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3044     fn from(err: LayoutError<'tcx>) -> Self {
3045         Self::Layout(err)
3046     }
3047 }
3048
3049 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3050     fn from(err: call::AdjustForForeignAbiError) -> Self {
3051         Self::AdjustForForeignAbi(err)
3052     }
3053 }
3054
3055 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3056     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3057         match self {
3058             Self::Layout(err) => err.fmt(f),
3059             Self::AdjustForForeignAbi(err) => err.fmt(f),
3060         }
3061     }
3062 }
3063
3064 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3065 // just for error handling.
3066 #[derive(Debug)]
3067 pub enum FnAbiRequest<'tcx> {
3068     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3069     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3070 }
3071
3072 /// Trait for contexts that want to be able to compute `FnAbi`s.
3073 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3074 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3075     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3076     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3077     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3078
3079     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3080     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3081     ///
3082     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3083     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3084     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3085     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3086     fn handle_fn_abi_err(
3087         &self,
3088         err: FnAbiError<'tcx>,
3089         span: Span,
3090         fn_abi_request: FnAbiRequest<'tcx>,
3091     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3092 }
3093
3094 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3095 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3096     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3097     ///
3098     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3099     /// instead, where the instance is an `InstanceDef::Virtual`.
3100     #[inline]
3101     fn fn_abi_of_fn_ptr(
3102         &self,
3103         sig: ty::PolyFnSig<'tcx>,
3104         extra_args: &'tcx ty::List<Ty<'tcx>>,
3105     ) -> Self::FnAbiOfResult {
3106         // FIXME(eddyb) get a better `span` here.
3107         let span = self.layout_tcx_at_span();
3108         let tcx = self.tcx().at(span);
3109
3110         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3111             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3112         ))
3113     }
3114
3115     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3116     /// direct calls to an `fn`.
3117     ///
3118     /// NB: that includes virtual calls, which are represented by "direct calls"
3119     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3120     #[inline]
3121     #[tracing::instrument(level = "debug", skip(self))]
3122     fn fn_abi_of_instance(
3123         &self,
3124         instance: ty::Instance<'tcx>,
3125         extra_args: &'tcx ty::List<Ty<'tcx>>,
3126     ) -> Self::FnAbiOfResult {
3127         // FIXME(eddyb) get a better `span` here.
3128         let span = self.layout_tcx_at_span();
3129         let tcx = self.tcx().at(span);
3130
3131         MaybeResult::from(
3132             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3133                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3134                 // we can get some kind of span even if one wasn't provided.
3135                 // However, we don't do this early in order to avoid calling
3136                 // `def_span` unconditionally (which may have a perf penalty).
3137                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3138                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3139             }),
3140         )
3141     }
3142 }
3143
3144 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3145
3146 fn fn_abi_of_fn_ptr<'tcx>(
3147     tcx: TyCtxt<'tcx>,
3148     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3149 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3150     let (param_env, (sig, extra_args)) = query.into_parts();
3151
3152     LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3153 }
3154
3155 fn fn_abi_of_instance<'tcx>(
3156     tcx: TyCtxt<'tcx>,
3157     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3158 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3159     let (param_env, (instance, extra_args)) = query.into_parts();
3160
3161     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3162
3163     let caller_location = if instance.def.requires_caller_location(tcx) {
3164         Some(tcx.caller_location_ty())
3165     } else {
3166         None
3167     };
3168
3169     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3170         sig,
3171         extra_args,
3172         caller_location,
3173         Some(instance.def_id()),
3174         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3175     )
3176 }
3177
3178 // Handle safe Rust thin and fat pointers.
3179 pub fn adjust_for_rust_scalar<'tcx>(
3180     cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3181     attrs: &mut ArgAttributes,
3182     scalar: Scalar,
3183     layout: TyAndLayout<'tcx>,
3184     offset: Size,
3185     is_return: bool,
3186 ) {
3187     // Booleans are always a noundef i1 that needs to be zero-extended.
3188     if scalar.is_bool() {
3189         attrs.ext(ArgExtension::Zext);
3190         attrs.set(ArgAttribute::NoUndef);
3191         return;
3192     }
3193
3194     // Scalars which have invalid values cannot be undef.
3195     if !scalar.is_always_valid(&cx) {
3196         attrs.set(ArgAttribute::NoUndef);
3197     }
3198
3199     // Only pointer types handled below.
3200     let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3201
3202     if !valid_range.contains(0) {
3203         attrs.set(ArgAttribute::NonNull);
3204     }
3205
3206     if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3207         if let Some(kind) = pointee.safe {
3208             attrs.pointee_align = Some(pointee.align);
3209
3210             // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3211             // for the entire duration of the function as they can be deallocated
3212             // at any time. Same for shared mutable references. If LLVM had a
3213             // way to say "dereferenceable on entry" we could use it here.
3214             attrs.pointee_size = match kind {
3215                 PointerKind::UniqueBorrowed
3216                 | PointerKind::UniqueBorrowedPinned
3217                 | PointerKind::Frozen => pointee.size,
3218                 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3219             };
3220
3221             // `Box`, `&T`, and `&mut T` cannot be undef.
3222             // Note that this only applies to the value of the pointer itself;
3223             // this attribute doesn't make it UB for the pointed-to data to be undef.
3224             attrs.set(ArgAttribute::NoUndef);
3225
3226             // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3227             // `noalias` for it. This can be turned off using an unstable flag.
3228             // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3229             let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3230
3231             // `&mut` pointer parameters never alias other parameters,
3232             // or mutable global data
3233             //
3234             // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3235             // and can be marked as both `readonly` and `noalias`, as
3236             // LLVM's definition of `noalias` is based solely on memory
3237             // dependencies rather than pointer equality
3238             //
3239             // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3240             // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3241             // or not to actually emit the attribute. It can also be controlled with the
3242             // `-Zmutable-noalias` debugging option.
3243             let no_alias = match kind {
3244                 PointerKind::SharedMutable
3245                 | PointerKind::UniqueBorrowed
3246                 | PointerKind::UniqueBorrowedPinned => false,
3247                 PointerKind::UniqueOwned => noalias_for_box,
3248                 PointerKind::Frozen => !is_return,
3249             };
3250             if no_alias {
3251                 attrs.set(ArgAttribute::NoAlias);
3252             }
3253
3254             if kind == PointerKind::Frozen && !is_return {
3255                 attrs.set(ArgAttribute::ReadOnly);
3256             }
3257
3258             if kind == PointerKind::UniqueBorrowed && !is_return {
3259                 attrs.set(ArgAttribute::NoAliasMutRef);
3260             }
3261         }
3262     }
3263 }
3264
3265 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3266     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3267     // arguments of this method, into a separate `struct`.
3268     #[tracing::instrument(
3269         level = "debug",
3270         skip(self, caller_location, fn_def_id, force_thin_self_ptr)
3271     )]
3272     fn fn_abi_new_uncached(
3273         &self,
3274         sig: ty::PolyFnSig<'tcx>,
3275         extra_args: &[Ty<'tcx>],
3276         caller_location: Option<Ty<'tcx>>,
3277         fn_def_id: Option<DefId>,
3278         // FIXME(eddyb) replace this with something typed, like an `enum`.
3279         force_thin_self_ptr: bool,
3280     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3281         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3282
3283         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3284
3285         let mut inputs = sig.inputs();
3286         let extra_args = if sig.abi == RustCall {
3287             assert!(!sig.c_variadic && extra_args.is_empty());
3288
3289             if let Some(input) = sig.inputs().last() {
3290                 if let ty::Tuple(tupled_arguments) = input.kind() {
3291                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3292                     tupled_arguments
3293                 } else {
3294                     bug!(
3295                         "argument to function with \"rust-call\" ABI \
3296                             is not a tuple"
3297                     );
3298                 }
3299             } else {
3300                 bug!(
3301                     "argument to function with \"rust-call\" ABI \
3302                         is not a tuple"
3303                 );
3304             }
3305         } else {
3306             assert!(sig.c_variadic || extra_args.is_empty());
3307             extra_args
3308         };
3309
3310         let target = &self.tcx.sess.target;
3311         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3312         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3313         let linux_s390x_gnu_like =
3314             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3315         let linux_sparc64_gnu_like =
3316             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3317         let linux_powerpc_gnu_like =
3318             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3319         use SpecAbi::*;
3320         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3321
3322         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3323             let span = tracing::debug_span!("arg_of");
3324             let _entered = span.enter();
3325             let is_return = arg_idx.is_none();
3326
3327             let layout = self.layout_of(ty)?;
3328             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3329                 // Don't pass the vtable, it's not an argument of the virtual fn.
3330                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3331                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3332                 make_thin_self_ptr(self, layout)
3333             } else {
3334                 layout
3335             };
3336
3337             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3338                 let mut attrs = ArgAttributes::new();
3339                 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3340                 attrs
3341             });
3342
3343             if arg.layout.is_zst() {
3344                 // For some forsaken reason, x86_64-pc-windows-gnu
3345                 // doesn't ignore zero-sized struct arguments.
3346                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3347                 if is_return
3348                     || rust_abi
3349                     || (!win_x64_gnu
3350                         && !linux_s390x_gnu_like
3351                         && !linux_sparc64_gnu_like
3352                         && !linux_powerpc_gnu_like)
3353                 {
3354                     arg.mode = PassMode::Ignore;
3355                 }
3356             }
3357
3358             Ok(arg)
3359         };
3360
3361         let mut fn_abi = FnAbi {
3362             ret: arg_of(sig.output(), None)?,
3363             args: inputs
3364                 .iter()
3365                 .copied()
3366                 .chain(extra_args.iter().copied())
3367                 .chain(caller_location)
3368                 .enumerate()
3369                 .map(|(i, ty)| arg_of(ty, Some(i)))
3370                 .collect::<Result<_, _>>()?,
3371             c_variadic: sig.c_variadic,
3372             fixed_count: inputs.len() as u32,
3373             conv,
3374             can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3375         };
3376         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3377         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3378         Ok(self.tcx.arena.alloc(fn_abi))
3379     }
3380
3381     #[tracing::instrument(level = "trace", skip(self))]
3382     fn fn_abi_adjust_for_abi(
3383         &self,
3384         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3385         abi: SpecAbi,
3386     ) -> Result<(), FnAbiError<'tcx>> {
3387         if abi == SpecAbi::Unadjusted {
3388             return Ok(());
3389         }
3390
3391         if abi == SpecAbi::Rust
3392             || abi == SpecAbi::RustCall
3393             || abi == SpecAbi::RustIntrinsic
3394             || abi == SpecAbi::PlatformIntrinsic
3395         {
3396             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3397                 if arg.is_ignore() {
3398                     return;
3399                 }
3400
3401                 match arg.layout.abi {
3402                     Abi::Aggregate { .. } => {}
3403
3404                     // This is a fun case! The gist of what this is doing is
3405                     // that we want callers and callees to always agree on the
3406                     // ABI of how they pass SIMD arguments. If we were to *not*
3407                     // make these arguments indirect then they'd be immediates
3408                     // in LLVM, which means that they'd used whatever the
3409                     // appropriate ABI is for the callee and the caller. That
3410                     // means, for example, if the caller doesn't have AVX
3411                     // enabled but the callee does, then passing an AVX argument
3412                     // across this boundary would cause corrupt data to show up.
3413                     //
3414                     // This problem is fixed by unconditionally passing SIMD
3415                     // arguments through memory between callers and callees
3416                     // which should get them all to agree on ABI regardless of
3417                     // target feature sets. Some more information about this
3418                     // issue can be found in #44367.
3419                     //
3420                     // Note that the platform intrinsic ABI is exempt here as
3421                     // that's how we connect up to LLVM and it's unstable
3422                     // anyway, we control all calls to it in libstd.
3423                     Abi::Vector { .. }
3424                         if abi != SpecAbi::PlatformIntrinsic
3425                             && self.tcx.sess.target.simd_types_indirect =>
3426                     {
3427                         arg.make_indirect();
3428                         return;
3429                     }
3430
3431                     _ => return,
3432                 }
3433
3434                 let size = arg.layout.size;
3435                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3436                     arg.make_indirect();
3437                 } else {
3438                     // We want to pass small aggregates as immediates, but using
3439                     // a LLVM aggregate type for this leads to bad optimizations,
3440                     // so we pick an appropriately sized integer type instead.
3441                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3442                 }
3443             };
3444             fixup(&mut fn_abi.ret);
3445             for arg in fn_abi.args.iter_mut() {
3446                 fixup(arg);
3447             }
3448         } else {
3449             fn_abi.adjust_for_foreign_abi(self, abi)?;
3450         }
3451
3452         Ok(())
3453     }
3454 }
3455
3456 #[tracing::instrument(level = "debug", skip(cx))]
3457 fn make_thin_self_ptr<'tcx>(
3458     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3459     layout: TyAndLayout<'tcx>,
3460 ) -> TyAndLayout<'tcx> {
3461     let tcx = cx.tcx();
3462     let fat_pointer_ty = if layout.is_unsized() {
3463         // unsized `self` is passed as a pointer to `self`
3464         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3465         tcx.mk_mut_ptr(layout.ty)
3466     } else {
3467         match layout.abi {
3468             Abi::ScalarPair(..) | Abi::Scalar(..) => (),
3469             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3470         }
3471
3472         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3473         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3474         // elsewhere in the compiler as a method on a `dyn Trait`.
3475         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3476         // get a built-in pointer type
3477         let mut fat_pointer_layout = layout;
3478         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3479             && !fat_pointer_layout.ty.is_region_ptr()
3480         {
3481             for i in 0..fat_pointer_layout.fields.count() {
3482                 let field_layout = fat_pointer_layout.field(cx, i);
3483
3484                 if !field_layout.is_zst() {
3485                     fat_pointer_layout = field_layout;
3486                     continue 'descend_newtypes;
3487                 }
3488             }
3489
3490             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3491         }
3492
3493         fat_pointer_layout.ty
3494     };
3495
3496     // we now have a type like `*mut RcBox<dyn Trait>`
3497     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3498     // this is understood as a special case elsewhere in the compiler
3499     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3500
3501     TyAndLayout {
3502         ty: fat_pointer_ty,
3503
3504         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3505         // should always work because the type is always `*mut ()`.
3506         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3507     }
3508 }