]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #101677 - winxpqq955:issue-101211, r=fee1-dead
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{
6     self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
7     TyCtxt, TypeVisitable,
8 };
9 use rustc_ast as ast;
10 use rustc_attr as attr;
11 use rustc_hir as hir;
12 use rustc_hir::def_id::DefId;
13 use rustc_hir::lang_items::LangItem;
14 use rustc_index::bit_set::BitSet;
15 use rustc_index::vec::{Idx, IndexVec};
16 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
17 use rustc_span::symbol::Symbol;
18 use rustc_span::{Span, DUMMY_SP};
19 use rustc_target::abi::call::{
20     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
21 };
22 use rustc_target::abi::*;
23 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
24
25 use std::cmp::{self, Ordering};
26 use std::fmt;
27 use std::iter;
28 use std::num::NonZeroUsize;
29 use std::ops::Bound;
30
31 use rand::{seq::SliceRandom, SeedableRng};
32 use rand_xoshiro::Xoshiro128StarStar;
33
34 pub fn provide(providers: &mut ty::query::Providers) {
35     *providers =
36         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
37 }
38
39 pub trait IntegerExt {
40     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
41     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
43     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44     fn repr_discr<'tcx>(
45         tcx: TyCtxt<'tcx>,
46         ty: Ty<'tcx>,
47         repr: &ReprOptions,
48         min: i128,
49         max: i128,
50     ) -> (Integer, bool);
51 }
52
53 impl IntegerExt for Integer {
54     #[inline]
55     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
56         match (*self, signed) {
57             (I8, false) => tcx.types.u8,
58             (I16, false) => tcx.types.u16,
59             (I32, false) => tcx.types.u32,
60             (I64, false) => tcx.types.u64,
61             (I128, false) => tcx.types.u128,
62             (I8, true) => tcx.types.i8,
63             (I16, true) => tcx.types.i16,
64             (I32, true) => tcx.types.i32,
65             (I64, true) => tcx.types.i64,
66             (I128, true) => tcx.types.i128,
67         }
68     }
69
70     /// Gets the Integer type from an attr::IntType.
71     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
72         let dl = cx.data_layout();
73
74         match ity {
75             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
76             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
77             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
78             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
79             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
80             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
81                 dl.ptr_sized_integer()
82             }
83         }
84     }
85
86     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
87         match ity {
88             ty::IntTy::I8 => I8,
89             ty::IntTy::I16 => I16,
90             ty::IntTy::I32 => I32,
91             ty::IntTy::I64 => I64,
92             ty::IntTy::I128 => I128,
93             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
94         }
95     }
96     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
97         match ity {
98             ty::UintTy::U8 => I8,
99             ty::UintTy::U16 => I16,
100             ty::UintTy::U32 => I32,
101             ty::UintTy::U64 => I64,
102             ty::UintTy::U128 => I128,
103             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
104         }
105     }
106
107     /// Finds the appropriate Integer type and signedness for the given
108     /// signed discriminant range and `#[repr]` attribute.
109     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
110     /// that shouldn't affect anything, other than maybe debuginfo.
111     fn repr_discr<'tcx>(
112         tcx: TyCtxt<'tcx>,
113         ty: Ty<'tcx>,
114         repr: &ReprOptions,
115         min: i128,
116         max: i128,
117     ) -> (Integer, bool) {
118         // Theoretically, negative values could be larger in unsigned representation
119         // than the unsigned representation of the signed minimum. However, if there
120         // are any negative values, the only valid unsigned representation is u128
121         // which can fit all i128 values, so the result remains unaffected.
122         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
123         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
124
125         if let Some(ity) = repr.int {
126             let discr = Integer::from_attr(&tcx, ity);
127             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
128             if discr < fit {
129                 bug!(
130                     "Integer::repr_discr: `#[repr]` hint too small for \
131                       discriminant range of enum `{}",
132                     ty
133                 )
134             }
135             return (discr, ity.is_signed());
136         }
137
138         let at_least = if repr.c() {
139             // This is usually I32, however it can be different on some platforms,
140             // notably hexagon and arm-none/thumb-none
141             tcx.data_layout().c_enum_min_size
142         } else {
143             // repr(Rust) enums try to be as small as possible
144             I8
145         };
146
147         // If there are no negative values, we can use the unsigned fit.
148         if min >= 0 {
149             (cmp::max(unsigned_fit, at_least), false)
150         } else {
151             (cmp::max(signed_fit, at_least), true)
152         }
153     }
154 }
155
156 pub trait PrimitiveExt {
157     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
159 }
160
161 impl PrimitiveExt for Primitive {
162     #[inline]
163     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
164         match *self {
165             Int(i, signed) => i.to_ty(tcx, signed),
166             F32 => tcx.types.f32,
167             F64 => tcx.types.f64,
168             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169         }
170     }
171
172     /// Return an *integer* type matching this primitive.
173     /// Useful in particular when dealing with enum discriminants.
174     #[inline]
175     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
176         match *self {
177             Int(i, signed) => i.to_ty(tcx, signed),
178             Pointer => tcx.types.usize,
179             F32 | F64 => bug!("floats do not have an int type"),
180         }
181     }
182 }
183
184 /// The first half of a fat pointer.
185 ///
186 /// - For a trait object, this is the address of the box.
187 /// - For a slice, this is the base address.
188 pub const FAT_PTR_ADDR: usize = 0;
189
190 /// The second half of a fat pointer.
191 ///
192 /// - For a trait object, this is the address of the vtable.
193 /// - For a slice, this is the length.
194 pub const FAT_PTR_EXTRA: usize = 1;
195
196 /// The maximum supported number of lanes in a SIMD vector.
197 ///
198 /// This value is selected based on backend support:
199 /// * LLVM does not appear to have a vector width limit.
200 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
201 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
202
203 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
204 pub enum LayoutError<'tcx> {
205     Unknown(Ty<'tcx>),
206     SizeOverflow(Ty<'tcx>),
207     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
208 }
209
210 impl<'tcx> fmt::Display for LayoutError<'tcx> {
211     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
212         match *self {
213             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
214             LayoutError::SizeOverflow(ty) => {
215                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
216             }
217             LayoutError::NormalizationFailure(t, e) => write!(
218                 f,
219                 "unable to determine layout for `{}` because `{}` cannot be normalized",
220                 t,
221                 e.get_type_for_failure()
222             ),
223         }
224     }
225 }
226
227 #[instrument(skip(tcx, query), level = "debug")]
228 fn layout_of<'tcx>(
229     tcx: TyCtxt<'tcx>,
230     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
231 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
232     let (param_env, ty) = query.into_parts();
233     debug!(?ty);
234
235     let param_env = param_env.with_reveal_all_normalized(tcx);
236     let unnormalized_ty = ty;
237
238     // FIXME: We might want to have two different versions of `layout_of`:
239     // One that can be called after typecheck has completed and can use
240     // `normalize_erasing_regions` here and another one that can be called
241     // before typecheck has completed and uses `try_normalize_erasing_regions`.
242     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
243         Ok(t) => t,
244         Err(normalization_error) => {
245             return Err(LayoutError::NormalizationFailure(ty, normalization_error));
246         }
247     };
248
249     if ty != unnormalized_ty {
250         // Ensure this layout is also cached for the normalized type.
251         return tcx.layout_of(param_env.and(ty));
252     }
253
254     let cx = LayoutCx { tcx, param_env };
255
256     let layout = cx.layout_of_uncached(ty)?;
257     let layout = TyAndLayout { ty, layout };
258
259     cx.record_layout_for_printing(layout);
260
261     sanity_check_layout(&cx, &layout);
262
263     Ok(layout)
264 }
265
266 #[derive(Clone, Copy)]
267 pub struct LayoutCx<'tcx, C> {
268     pub tcx: C,
269     pub param_env: ty::ParamEnv<'tcx>,
270 }
271
272 #[derive(Copy, Clone, Debug)]
273 enum StructKind {
274     /// A tuple, closure, or univariant which cannot be coerced to unsized.
275     AlwaysSized,
276     /// A univariant, the last field of which may be coerced to unsized.
277     MaybeUnsized,
278     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
279     Prefixed(Size, Align),
280 }
281
282 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
283 // This is used to go between `memory_index` (source field order to memory order)
284 // and `inverse_memory_index` (memory order to source field order).
285 // See also `FieldsShape::Arbitrary::memory_index` for more details.
286 // FIXME(eddyb) build a better abstraction for permutations, if possible.
287 fn invert_mapping(map: &[u32]) -> Vec<u32> {
288     let mut inverse = vec![0; map.len()];
289     for i in 0..map.len() {
290         inverse[map[i] as usize] = i as u32;
291     }
292     inverse
293 }
294
295 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
296     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
297         let dl = self.data_layout();
298         let b_align = b.align(dl);
299         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
300         let b_offset = a.size(dl).align_to(b_align.abi);
301         let size = (b_offset + b.size(dl)).align_to(align.abi);
302
303         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
304         // returns the last maximum.
305         let largest_niche = Niche::from_scalar(dl, b_offset, b)
306             .into_iter()
307             .chain(Niche::from_scalar(dl, Size::ZERO, a))
308             .max_by_key(|niche| niche.available(dl));
309
310         LayoutS {
311             variants: Variants::Single { index: VariantIdx::new(0) },
312             fields: FieldsShape::Arbitrary {
313                 offsets: vec![Size::ZERO, b_offset],
314                 memory_index: vec![0, 1],
315             },
316             abi: Abi::ScalarPair(a, b),
317             largest_niche,
318             align,
319             size,
320         }
321     }
322
323     fn univariant_uninterned(
324         &self,
325         ty: Ty<'tcx>,
326         fields: &[TyAndLayout<'_>],
327         repr: &ReprOptions,
328         kind: StructKind,
329     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
330         let dl = self.data_layout();
331         let pack = repr.pack;
332         if pack.is_some() && repr.align.is_some() {
333             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
334             return Err(LayoutError::Unknown(ty));
335         }
336
337         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
338
339         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
340
341         let optimize = !repr.inhibit_struct_field_reordering_opt();
342         if optimize {
343             let end =
344                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
345             let optimizing = &mut inverse_memory_index[..end];
346             let field_align = |f: &TyAndLayout<'_>| {
347                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
348             };
349
350             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
351             // the field ordering to try and catch some code making assumptions about layouts
352             // we don't guarantee
353             if repr.can_randomize_type_layout() {
354                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
355                 // randomize field ordering with
356                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
357
358                 // Shuffle the ordering of the fields
359                 optimizing.shuffle(&mut rng);
360
361             // Otherwise we just leave things alone and actually optimize the type's fields
362             } else {
363                 match kind {
364                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
365                         optimizing.sort_by_key(|&x| {
366                             // Place ZSTs first to avoid "interesting offsets",
367                             // especially with only one or two non-ZST fields.
368                             let f = &fields[x as usize];
369                             (!f.is_zst(), cmp::Reverse(field_align(f)))
370                         });
371                     }
372
373                     StructKind::Prefixed(..) => {
374                         // Sort in ascending alignment so that the layout stays optimal
375                         // regardless of the prefix
376                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
377                     }
378                 }
379
380                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
381                 //                 regardless of the status of `-Z randomize-layout`
382             }
383         }
384
385         // inverse_memory_index holds field indices by increasing memory offset.
386         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
387         // We now write field offsets to the corresponding offset slot;
388         // field 5 with offset 0 puts 0 in offsets[5].
389         // At the bottom of this function, we invert `inverse_memory_index` to
390         // produce `memory_index` (see `invert_mapping`).
391
392         let mut sized = true;
393         let mut offsets = vec![Size::ZERO; fields.len()];
394         let mut offset = Size::ZERO;
395         let mut largest_niche = None;
396         let mut largest_niche_available = 0;
397
398         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
399             let prefix_align =
400                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
401             align = align.max(AbiAndPrefAlign::new(prefix_align));
402             offset = prefix_size.align_to(prefix_align);
403         }
404
405         for &i in &inverse_memory_index {
406             let field = fields[i as usize];
407             if !sized {
408                 self.tcx.sess.delay_span_bug(
409                     DUMMY_SP,
410                     &format!(
411                         "univariant: field #{} of `{}` comes after unsized field",
412                         offsets.len(),
413                         ty
414                     ),
415                 );
416             }
417
418             if field.is_unsized() {
419                 sized = false;
420             }
421
422             // Invariant: offset < dl.obj_size_bound() <= 1<<61
423             let field_align = if let Some(pack) = pack {
424                 field.align.min(AbiAndPrefAlign::new(pack))
425             } else {
426                 field.align
427             };
428             offset = offset.align_to(field_align.abi);
429             align = align.max(field_align);
430
431             debug!("univariant offset: {:?} field: {:#?}", offset, field);
432             offsets[i as usize] = offset;
433
434             if let Some(mut niche) = field.largest_niche {
435                 let available = niche.available(dl);
436                 if available > largest_niche_available {
437                     largest_niche_available = available;
438                     niche.offset += offset;
439                     largest_niche = Some(niche);
440                 }
441             }
442
443             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
444         }
445
446         if let Some(repr_align) = repr.align {
447             align = align.max(AbiAndPrefAlign::new(repr_align));
448         }
449
450         debug!("univariant min_size: {:?}", offset);
451         let min_size = offset;
452
453         // As stated above, inverse_memory_index holds field indices by increasing offset.
454         // This makes it an already-sorted view of the offsets vec.
455         // To invert it, consider:
456         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
457         // Field 5 would be the first element, so memory_index is i:
458         // Note: if we didn't optimize, it's already right.
459
460         let memory_index =
461             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
462
463         let size = min_size.align_to(align.abi);
464         let mut abi = Abi::Aggregate { sized };
465
466         // Unpack newtype ABIs and find scalar pairs.
467         if sized && size.bytes() > 0 {
468             // All other fields must be ZSTs.
469             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
470
471             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
472                 // We have exactly one non-ZST field.
473                 (Some((i, field)), None, None) => {
474                     // Field fills the struct and it has a scalar or scalar pair ABI.
475                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
476                     {
477                         match field.abi {
478                             // For plain scalars, or vectors of them, we can't unpack
479                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
480                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
481                                 abi = field.abi;
482                             }
483                             // But scalar pairs are Rust-specific and get
484                             // treated as aggregates by C ABIs anyway.
485                             Abi::ScalarPair(..) => {
486                                 abi = field.abi;
487                             }
488                             _ => {}
489                         }
490                     }
491                 }
492
493                 // Two non-ZST fields, and they're both scalars.
494                 (Some((i, a)), Some((j, b)), None) => {
495                     match (a.abi, b.abi) {
496                         (Abi::Scalar(a), Abi::Scalar(b)) => {
497                             // Order by the memory placement, not source order.
498                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
499                                 ((i, a), (j, b))
500                             } else {
501                                 ((j, b), (i, a))
502                             };
503                             let pair = self.scalar_pair(a, b);
504                             let pair_offsets = match pair.fields {
505                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
506                                     assert_eq!(memory_index, &[0, 1]);
507                                     offsets
508                                 }
509                                 _ => bug!(),
510                             };
511                             if offsets[i] == pair_offsets[0]
512                                 && offsets[j] == pair_offsets[1]
513                                 && align == pair.align
514                                 && size == pair.size
515                             {
516                                 // We can use `ScalarPair` only when it matches our
517                                 // already computed layout (including `#[repr(C)]`).
518                                 abi = pair.abi;
519                             }
520                         }
521                         _ => {}
522                     }
523                 }
524
525                 _ => {}
526             }
527         }
528
529         if fields.iter().any(|f| f.abi.is_uninhabited()) {
530             abi = Abi::Uninhabited;
531         }
532
533         Ok(LayoutS {
534             variants: Variants::Single { index: VariantIdx::new(0) },
535             fields: FieldsShape::Arbitrary { offsets, memory_index },
536             abi,
537             largest_niche,
538             align,
539             size,
540         })
541     }
542
543     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
544         let tcx = self.tcx;
545         let param_env = self.param_env;
546         let dl = self.data_layout();
547         let scalar_unit = |value: Primitive| {
548             let size = value.size(dl);
549             assert!(size.bits() <= 128);
550             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
551         };
552         let scalar =
553             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
554
555         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
556             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
557         };
558         debug_assert!(!ty.has_infer_types_or_consts());
559
560         Ok(match *ty.kind() {
561             // Basic scalars.
562             ty::Bool => tcx.intern_layout(LayoutS::scalar(
563                 self,
564                 Scalar::Initialized {
565                     value: Int(I8, false),
566                     valid_range: WrappingRange { start: 0, end: 1 },
567                 },
568             )),
569             ty::Char => tcx.intern_layout(LayoutS::scalar(
570                 self,
571                 Scalar::Initialized {
572                     value: Int(I32, false),
573                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
574                 },
575             )),
576             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
577             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
578             ty::Float(fty) => scalar(match fty {
579                 ty::FloatTy::F32 => F32,
580                 ty::FloatTy::F64 => F64,
581             }),
582             ty::FnPtr(_) => {
583                 let mut ptr = scalar_unit(Pointer);
584                 ptr.valid_range_mut().start = 1;
585                 tcx.intern_layout(LayoutS::scalar(self, ptr))
586             }
587
588             // The never type.
589             ty::Never => tcx.intern_layout(LayoutS {
590                 variants: Variants::Single { index: VariantIdx::new(0) },
591                 fields: FieldsShape::Primitive,
592                 abi: Abi::Uninhabited,
593                 largest_niche: None,
594                 align: dl.i8_align,
595                 size: Size::ZERO,
596             }),
597
598             // Potentially-wide pointers.
599             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
600                 let mut data_ptr = scalar_unit(Pointer);
601                 if !ty.is_unsafe_ptr() {
602                     data_ptr.valid_range_mut().start = 1;
603                 }
604
605                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
606                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
607                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
608                 }
609
610                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
611                 let metadata = match unsized_part.kind() {
612                     ty::Foreign(..) => {
613                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
614                     }
615                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
616                     ty::Dynamic(..) => {
617                         let mut vtable = scalar_unit(Pointer);
618                         vtable.valid_range_mut().start = 1;
619                         vtable
620                     }
621                     _ => return Err(LayoutError::Unknown(unsized_part)),
622                 };
623
624                 // Effectively a (ptr, meta) tuple.
625                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
626             }
627
628             // Arrays and slices.
629             ty::Array(element, mut count) => {
630                 if count.has_projections() {
631                     count = tcx.normalize_erasing_regions(param_env, count);
632                     if count.has_projections() {
633                         return Err(LayoutError::Unknown(ty));
634                     }
635                 }
636
637                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
638                 let element = self.layout_of(element)?;
639                 let size =
640                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
641
642                 let abi =
643                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
644                         Abi::Uninhabited
645                     } else {
646                         Abi::Aggregate { sized: true }
647                     };
648
649                 let largest_niche = if count != 0 { element.largest_niche } else { None };
650
651                 tcx.intern_layout(LayoutS {
652                     variants: Variants::Single { index: VariantIdx::new(0) },
653                     fields: FieldsShape::Array { stride: element.size, count },
654                     abi,
655                     largest_niche,
656                     align: element.align,
657                     size,
658                 })
659             }
660             ty::Slice(element) => {
661                 let element = self.layout_of(element)?;
662                 tcx.intern_layout(LayoutS {
663                     variants: Variants::Single { index: VariantIdx::new(0) },
664                     fields: FieldsShape::Array { stride: element.size, count: 0 },
665                     abi: Abi::Aggregate { sized: false },
666                     largest_niche: None,
667                     align: element.align,
668                     size: Size::ZERO,
669                 })
670             }
671             ty::Str => tcx.intern_layout(LayoutS {
672                 variants: Variants::Single { index: VariantIdx::new(0) },
673                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
674                 abi: Abi::Aggregate { sized: false },
675                 largest_niche: None,
676                 align: dl.i8_align,
677                 size: Size::ZERO,
678             }),
679
680             // Odd unit types.
681             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
682             ty::Dynamic(..) | ty::Foreign(..) => {
683                 let mut unit = self.univariant_uninterned(
684                     ty,
685                     &[],
686                     &ReprOptions::default(),
687                     StructKind::AlwaysSized,
688                 )?;
689                 match unit.abi {
690                     Abi::Aggregate { ref mut sized } => *sized = false,
691                     _ => bug!(),
692                 }
693                 tcx.intern_layout(unit)
694             }
695
696             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
697
698             ty::Closure(_, ref substs) => {
699                 let tys = substs.as_closure().upvar_tys();
700                 univariant(
701                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
702                     &ReprOptions::default(),
703                     StructKind::AlwaysSized,
704                 )?
705             }
706
707             ty::Tuple(tys) => {
708                 let kind =
709                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
710
711                 univariant(
712                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
713                     &ReprOptions::default(),
714                     kind,
715                 )?
716             }
717
718             // SIMD vector types.
719             ty::Adt(def, substs) if def.repr().simd() => {
720                 if !def.is_struct() {
721                     // Should have yielded E0517 by now.
722                     tcx.sess.delay_span_bug(
723                         DUMMY_SP,
724                         "#[repr(simd)] was applied to an ADT that is not a struct",
725                     );
726                     return Err(LayoutError::Unknown(ty));
727                 }
728
729                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
730                 //
731                 // * #[repr(simd)] struct S(T, T, T, T);
732                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
733                 // * #[repr(simd)] struct S([T; 4])
734                 //
735                 // where T is a primitive scalar (integer/float/pointer).
736
737                 // SIMD vectors with zero fields are not supported.
738                 // (should be caught by typeck)
739                 if def.non_enum_variant().fields.is_empty() {
740                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
741                 }
742
743                 // Type of the first ADT field:
744                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
745
746                 // Heterogeneous SIMD vectors are not supported:
747                 // (should be caught by typeck)
748                 for fi in &def.non_enum_variant().fields {
749                     if fi.ty(tcx, substs) != f0_ty {
750                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
751                     }
752                 }
753
754                 // The element type and number of elements of the SIMD vector
755                 // are obtained from:
756                 //
757                 // * the element type and length of the single array field, if
758                 // the first field is of array type, or
759                 //
760                 // * the homogeneous field type and the number of fields.
761                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
762                     // First ADT field is an array:
763
764                     // SIMD vectors with multiple array fields are not supported:
765                     // (should be caught by typeck)
766                     if def.non_enum_variant().fields.len() != 1 {
767                         tcx.sess.fatal(&format!(
768                             "monomorphising SIMD type `{}` with more than one array field",
769                             ty
770                         ));
771                     }
772
773                     // Extract the number of elements from the layout of the array field:
774                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
775                         return Err(LayoutError::Unknown(ty));
776                     };
777
778                     (*e_ty, *count, true)
779                 } else {
780                     // First ADT field is not an array:
781                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
782                 };
783
784                 // SIMD vectors of zero length are not supported.
785                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
786                 // support.
787                 //
788                 // Can't be caught in typeck if the array length is generic.
789                 if e_len == 0 {
790                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
791                 } else if e_len > MAX_SIMD_LANES {
792                     tcx.sess.fatal(&format!(
793                         "monomorphising SIMD type `{}` of length greater than {}",
794                         ty, MAX_SIMD_LANES,
795                     ));
796                 }
797
798                 // Compute the ABI of the element type:
799                 let e_ly = self.layout_of(e_ty)?;
800                 let Abi::Scalar(e_abi) = e_ly.abi else {
801                     // This error isn't caught in typeck, e.g., if
802                     // the element type of the vector is generic.
803                     tcx.sess.fatal(&format!(
804                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
805                         (integer/float/pointer) element type `{}`",
806                         ty, e_ty
807                     ))
808                 };
809
810                 // Compute the size and alignment of the vector:
811                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
812                 let align = dl.vector_align(size);
813                 let size = size.align_to(align.abi);
814
815                 // Compute the placement of the vector fields:
816                 let fields = if is_array {
817                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
818                 } else {
819                     FieldsShape::Array { stride: e_ly.size, count: e_len }
820                 };
821
822                 tcx.intern_layout(LayoutS {
823                     variants: Variants::Single { index: VariantIdx::new(0) },
824                     fields,
825                     abi: Abi::Vector { element: e_abi, count: e_len },
826                     largest_niche: e_ly.largest_niche,
827                     size,
828                     align,
829                 })
830             }
831
832             // ADTs.
833             ty::Adt(def, substs) => {
834                 // Cache the field layouts.
835                 let variants = def
836                     .variants()
837                     .iter()
838                     .map(|v| {
839                         v.fields
840                             .iter()
841                             .map(|field| self.layout_of(field.ty(tcx, substs)))
842                             .collect::<Result<Vec<_>, _>>()
843                     })
844                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
845
846                 if def.is_union() {
847                     if def.repr().pack.is_some() && def.repr().align.is_some() {
848                         self.tcx.sess.delay_span_bug(
849                             tcx.def_span(def.did()),
850                             "union cannot be packed and aligned",
851                         );
852                         return Err(LayoutError::Unknown(ty));
853                     }
854
855                     let mut align =
856                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
857
858                     if let Some(repr_align) = def.repr().align {
859                         align = align.max(AbiAndPrefAlign::new(repr_align));
860                     }
861
862                     let optimize = !def.repr().inhibit_union_abi_opt();
863                     let mut size = Size::ZERO;
864                     let mut abi = Abi::Aggregate { sized: true };
865                     let index = VariantIdx::new(0);
866                     for field in &variants[index] {
867                         assert!(!field.is_unsized());
868                         align = align.max(field.align);
869
870                         // If all non-ZST fields have the same ABI, forward this ABI
871                         if optimize && !field.is_zst() {
872                             // Discard valid range information and allow undef
873                             let field_abi = match field.abi {
874                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
875                                 Abi::ScalarPair(x, y) => {
876                                     Abi::ScalarPair(x.to_union(), y.to_union())
877                                 }
878                                 Abi::Vector { element: x, count } => {
879                                     Abi::Vector { element: x.to_union(), count }
880                                 }
881                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
882                                     Abi::Aggregate { sized: true }
883                                 }
884                             };
885
886                             if size == Size::ZERO {
887                                 // first non ZST: initialize 'abi'
888                                 abi = field_abi;
889                             } else if abi != field_abi {
890                                 // different fields have different ABI: reset to Aggregate
891                                 abi = Abi::Aggregate { sized: true };
892                             }
893                         }
894
895                         size = cmp::max(size, field.size);
896                     }
897
898                     if let Some(pack) = def.repr().pack {
899                         align = align.min(AbiAndPrefAlign::new(pack));
900                     }
901
902                     return Ok(tcx.intern_layout(LayoutS {
903                         variants: Variants::Single { index },
904                         fields: FieldsShape::Union(
905                             NonZeroUsize::new(variants[index].len())
906                                 .ok_or(LayoutError::Unknown(ty))?,
907                         ),
908                         abi,
909                         largest_niche: None,
910                         align,
911                         size: size.align_to(align.abi),
912                     }));
913                 }
914
915                 // A variant is absent if it's uninhabited and only has ZST fields.
916                 // Present uninhabited variants only require space for their fields,
917                 // but *not* an encoding of the discriminant (e.g., a tag value).
918                 // See issue #49298 for more details on the need to leave space
919                 // for non-ZST uninhabited data (mostly partial initialization).
920                 let absent = |fields: &[TyAndLayout<'_>]| {
921                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
922                     let is_zst = fields.iter().all(|f| f.is_zst());
923                     uninhabited && is_zst
924                 };
925                 let (present_first, present_second) = {
926                     let mut present_variants = variants
927                         .iter_enumerated()
928                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
929                     (present_variants.next(), present_variants.next())
930                 };
931                 let present_first = match present_first {
932                     Some(present_first) => present_first,
933                     // Uninhabited because it has no variants, or only absent ones.
934                     None if def.is_enum() => {
935                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
936                     }
937                     // If it's a struct, still compute a layout so that we can still compute the
938                     // field offsets.
939                     None => VariantIdx::new(0),
940                 };
941
942                 let is_struct = !def.is_enum() ||
943                     // Only one variant is present.
944                     (present_second.is_none() &&
945                     // Representation optimizations are allowed.
946                     !def.repr().inhibit_enum_layout_opt());
947                 if is_struct {
948                     // Struct, or univariant enum equivalent to a struct.
949                     // (Typechecking will reject discriminant-sizing attrs.)
950
951                     let v = present_first;
952                     let kind = if def.is_enum() || variants[v].is_empty() {
953                         StructKind::AlwaysSized
954                     } else {
955                         let param_env = tcx.param_env(def.did());
956                         let last_field = def.variant(v).fields.last().unwrap();
957                         let always_sized =
958                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
959                         if !always_sized {
960                             StructKind::MaybeUnsized
961                         } else {
962                             StructKind::AlwaysSized
963                         }
964                     };
965
966                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
967                     st.variants = Variants::Single { index: v };
968
969                     if def.is_unsafe_cell() {
970                         let hide_niches = |scalar: &mut _| match scalar {
971                             Scalar::Initialized { value, valid_range } => {
972                                 *valid_range = WrappingRange::full(value.size(dl))
973                             }
974                             // Already doesn't have any niches
975                             Scalar::Union { .. } => {}
976                         };
977                         match &mut st.abi {
978                             Abi::Uninhabited => {}
979                             Abi::Scalar(scalar) => hide_niches(scalar),
980                             Abi::ScalarPair(a, b) => {
981                                 hide_niches(a);
982                                 hide_niches(b);
983                             }
984                             Abi::Vector { element, count: _ } => hide_niches(element),
985                             Abi::Aggregate { sized: _ } => {}
986                         }
987                         st.largest_niche = None;
988                         return Ok(tcx.intern_layout(st));
989                     }
990
991                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
992                     match st.abi {
993                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
994                             // the asserts ensure that we are not using the
995                             // `#[rustc_layout_scalar_valid_range(n)]`
996                             // attribute to widen the range of anything as that would probably
997                             // result in UB somewhere
998                             // FIXME(eddyb) the asserts are probably not needed,
999                             // as larger validity ranges would result in missed
1000                             // optimizations, *not* wrongly assuming the inner
1001                             // value is valid. e.g. unions enlarge validity ranges,
1002                             // because the values may be uninitialized.
1003                             if let Bound::Included(start) = start {
1004                                 // FIXME(eddyb) this might be incorrect - it doesn't
1005                                 // account for wrap-around (end < start) ranges.
1006                                 let valid_range = scalar.valid_range_mut();
1007                                 assert!(valid_range.start <= start);
1008                                 valid_range.start = start;
1009                             }
1010                             if let Bound::Included(end) = end {
1011                                 // FIXME(eddyb) this might be incorrect - it doesn't
1012                                 // account for wrap-around (end < start) ranges.
1013                                 let valid_range = scalar.valid_range_mut();
1014                                 assert!(valid_range.end >= end);
1015                                 valid_range.end = end;
1016                             }
1017
1018                             // Update `largest_niche` if we have introduced a larger niche.
1019                             let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1020                             if let Some(niche) = niche {
1021                                 match st.largest_niche {
1022                                     Some(largest_niche) => {
1023                                         // Replace the existing niche even if they're equal,
1024                                         // because this one is at a lower offset.
1025                                         if largest_niche.available(dl) <= niche.available(dl) {
1026                                             st.largest_niche = Some(niche);
1027                                         }
1028                                     }
1029                                     None => st.largest_niche = Some(niche),
1030                                 }
1031                             }
1032                         }
1033                         _ => assert!(
1034                             start == Bound::Unbounded && end == Bound::Unbounded,
1035                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1036                             def,
1037                             st,
1038                         ),
1039                     }
1040
1041                     return Ok(tcx.intern_layout(st));
1042                 }
1043
1044                 // At this point, we have handled all unions and
1045                 // structs. (We have also handled univariant enums
1046                 // that allow representation optimization.)
1047                 assert!(def.is_enum());
1048
1049                 // Until we've decided whether to use the tagged or
1050                 // niche filling LayoutS, we don't want to intern the
1051                 // variant layouts, so we can't store them in the
1052                 // overall LayoutS. Store the overall LayoutS
1053                 // and the variant LayoutSs here until then.
1054                 struct TmpLayout<'tcx> {
1055                     layout: LayoutS<'tcx>,
1056                     variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1057                 }
1058
1059                 let calculate_niche_filling_layout =
1060                     || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1061                         // The current code for niche-filling relies on variant indices
1062                         // instead of actual discriminants, so enums with
1063                         // explicit discriminants (RFC #2363) would misbehave.
1064                         if def.repr().inhibit_enum_layout_opt()
1065                             || def
1066                                 .variants()
1067                                 .iter_enumerated()
1068                                 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1069                         {
1070                             return Ok(None);
1071                         }
1072
1073                         if variants.len() < 2 {
1074                             return Ok(None);
1075                         }
1076
1077                         let mut align = dl.aggregate_align;
1078                         let mut variant_layouts = variants
1079                             .iter_enumerated()
1080                             .map(|(j, v)| {
1081                                 let mut st = self.univariant_uninterned(
1082                                     ty,
1083                                     v,
1084                                     &def.repr(),
1085                                     StructKind::AlwaysSized,
1086                                 )?;
1087                                 st.variants = Variants::Single { index: j };
1088
1089                                 align = align.max(st.align);
1090
1091                                 Ok(st)
1092                             })
1093                             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1094
1095                         let largest_variant_index = match variant_layouts
1096                             .iter_enumerated()
1097                             .max_by_key(|(_i, layout)| layout.size.bytes())
1098                             .map(|(i, _layout)| i)
1099                         {
1100                             None => return Ok(None),
1101                             Some(i) => i,
1102                         };
1103
1104                         let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1105                         let needs_disc = |index: VariantIdx| {
1106                             index != largest_variant_index && !absent(&variants[index])
1107                         };
1108                         let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1109                             ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1110
1111                         let count = niche_variants.size_hint().1.unwrap() as u128;
1112
1113                         // Find the field with the largest niche
1114                         let (field_index, niche, (niche_start, niche_scalar)) = match variants
1115                             [largest_variant_index]
1116                             .iter()
1117                             .enumerate()
1118                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1119                             .max_by_key(|(_, niche)| niche.available(dl))
1120                             .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1121                         {
1122                             None => return Ok(None),
1123                             Some(x) => x,
1124                         };
1125
1126                         let niche_offset = niche.offset
1127                             + variant_layouts[largest_variant_index].fields.offset(field_index);
1128                         let niche_size = niche.value.size(dl);
1129                         let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1130
1131                         let all_variants_fit =
1132                             variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1133                                 if i == largest_variant_index {
1134                                     return true;
1135                                 }
1136
1137                                 layout.largest_niche = None;
1138
1139                                 if layout.size <= niche_offset {
1140                                     // This variant will fit before the niche.
1141                                     return true;
1142                                 }
1143
1144                                 // Determine if it'll fit after the niche.
1145                                 let this_align = layout.align.abi;
1146                                 let this_offset = (niche_offset + niche_size).align_to(this_align);
1147
1148                                 if this_offset + layout.size > size {
1149                                     return false;
1150                                 }
1151
1152                                 // It'll fit, but we need to make some adjustments.
1153                                 match layout.fields {
1154                                     FieldsShape::Arbitrary { ref mut offsets, .. } => {
1155                                         for (j, offset) in offsets.iter_mut().enumerate() {
1156                                             if !variants[i][j].is_zst() {
1157                                                 *offset += this_offset;
1158                                             }
1159                                         }
1160                                     }
1161                                     _ => {
1162                                         panic!("Layout of fields should be Arbitrary for variants")
1163                                     }
1164                                 }
1165
1166                                 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1167                                 if !layout.abi.is_uninhabited() {
1168                                     layout.abi = Abi::Aggregate { sized: true };
1169                                 }
1170                                 layout.size += this_offset;
1171
1172                                 true
1173                             });
1174
1175                         if !all_variants_fit {
1176                             return Ok(None);
1177                         }
1178
1179                         let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1180
1181                         let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1182                             i == largest_variant_index || layout.size == Size::ZERO
1183                         });
1184                         let same_size = size == variant_layouts[largest_variant_index].size;
1185                         let same_align = align == variant_layouts[largest_variant_index].align;
1186
1187                         let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1188                             Abi::Uninhabited
1189                         } else if same_size && same_align && others_zst {
1190                             match variant_layouts[largest_variant_index].abi {
1191                                 // When the total alignment and size match, we can use the
1192                                 // same ABI as the scalar variant with the reserved niche.
1193                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1194                                 Abi::ScalarPair(first, second) => {
1195                                     // Only the niche is guaranteed to be initialised,
1196                                     // so use union layouts for the other primitive.
1197                                     if niche_offset == Size::ZERO {
1198                                         Abi::ScalarPair(niche_scalar, second.to_union())
1199                                     } else {
1200                                         Abi::ScalarPair(first.to_union(), niche_scalar)
1201                                     }
1202                                 }
1203                                 _ => Abi::Aggregate { sized: true },
1204                             }
1205                         } else {
1206                             Abi::Aggregate { sized: true }
1207                         };
1208
1209                         let layout = LayoutS {
1210                             variants: Variants::Multiple {
1211                                 tag: niche_scalar,
1212                                 tag_encoding: TagEncoding::Niche {
1213                                     untagged_variant: largest_variant_index,
1214                                     niche_variants,
1215                                     niche_start,
1216                                 },
1217                                 tag_field: 0,
1218                                 variants: IndexVec::new(),
1219                             },
1220                             fields: FieldsShape::Arbitrary {
1221                                 offsets: vec![niche_offset],
1222                                 memory_index: vec![0],
1223                             },
1224                             abi,
1225                             largest_niche,
1226                             size,
1227                             align,
1228                         };
1229
1230                         Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1231                     };
1232
1233                 let niche_filling_layout = calculate_niche_filling_layout()?;
1234
1235                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1236                 let discr_type = def.repr().discr_type();
1237                 let bits = Integer::from_attr(self, discr_type).size().bits();
1238                 for (i, discr) in def.discriminants(tcx) {
1239                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1240                         continue;
1241                     }
1242                     let mut x = discr.val as i128;
1243                     if discr_type.is_signed() {
1244                         // sign extend the raw representation to be an i128
1245                         x = (x << (128 - bits)) >> (128 - bits);
1246                     }
1247                     if x < min {
1248                         min = x;
1249                     }
1250                     if x > max {
1251                         max = x;
1252                     }
1253                 }
1254                 // We might have no inhabited variants, so pretend there's at least one.
1255                 if (min, max) == (i128::MAX, i128::MIN) {
1256                     min = 0;
1257                     max = 0;
1258                 }
1259                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1260                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1261
1262                 let mut align = dl.aggregate_align;
1263                 let mut size = Size::ZERO;
1264
1265                 // We're interested in the smallest alignment, so start large.
1266                 let mut start_align = Align::from_bytes(256).unwrap();
1267                 assert_eq!(Integer::for_align(dl, start_align), None);
1268
1269                 // repr(C) on an enum tells us to make a (tag, union) layout,
1270                 // so we need to grow the prefix alignment to be at least
1271                 // the alignment of the union. (This value is used both for
1272                 // determining the alignment of the overall enum, and the
1273                 // determining the alignment of the payload after the tag.)
1274                 let mut prefix_align = min_ity.align(dl).abi;
1275                 if def.repr().c() {
1276                     for fields in &variants {
1277                         for field in fields {
1278                             prefix_align = prefix_align.max(field.align.abi);
1279                         }
1280                     }
1281                 }
1282
1283                 // Create the set of structs that represent each variant.
1284                 let mut layout_variants = variants
1285                     .iter_enumerated()
1286                     .map(|(i, field_layouts)| {
1287                         let mut st = self.univariant_uninterned(
1288                             ty,
1289                             &field_layouts,
1290                             &def.repr(),
1291                             StructKind::Prefixed(min_ity.size(), prefix_align),
1292                         )?;
1293                         st.variants = Variants::Single { index: i };
1294                         // Find the first field we can't move later
1295                         // to make room for a larger discriminant.
1296                         for field in
1297                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1298                         {
1299                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1300                                 start_align = start_align.min(field.align.abi);
1301                                 break;
1302                             }
1303                         }
1304                         size = cmp::max(size, st.size);
1305                         align = align.max(st.align);
1306                         Ok(st)
1307                     })
1308                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1309
1310                 // Align the maximum variant size to the largest alignment.
1311                 size = size.align_to(align.abi);
1312
1313                 if size.bytes() >= dl.obj_size_bound() {
1314                     return Err(LayoutError::SizeOverflow(ty));
1315                 }
1316
1317                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1318                 if typeck_ity < min_ity {
1319                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1320                     // some reason at this point (based on values discriminant can take on). Mostly
1321                     // because this discriminant will be loaded, and then stored into variable of
1322                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1323                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1324                     // discriminant values. That would be a bug, because then, in codegen, in order
1325                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1326                     // space necessary to represent would have to be discarded (or layout is wrong
1327                     // on thinking it needs 16 bits)
1328                     bug!(
1329                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1330                         min_ity,
1331                         typeck_ity
1332                     );
1333                     // However, it is fine to make discr type however large (as an optimisation)
1334                     // after this point â€“ we’ll just truncate the value we load in codegen.
1335                 }
1336
1337                 // Check to see if we should use a different type for the
1338                 // discriminant. We can safely use a type with the same size
1339                 // as the alignment of the first field of each variant.
1340                 // We increase the size of the discriminant to avoid LLVM copying
1341                 // padding when it doesn't need to. This normally causes unaligned
1342                 // load/stores and excessive memcpy/memset operations. By using a
1343                 // bigger integer size, LLVM can be sure about its contents and
1344                 // won't be so conservative.
1345
1346                 // Use the initial field alignment
1347                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1348                     min_ity
1349                 } else {
1350                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1351                 };
1352
1353                 // If the alignment is not larger than the chosen discriminant size,
1354                 // don't use the alignment as the final size.
1355                 if ity <= min_ity {
1356                     ity = min_ity;
1357                 } else {
1358                     // Patch up the variants' first few fields.
1359                     let old_ity_size = min_ity.size();
1360                     let new_ity_size = ity.size();
1361                     for variant in &mut layout_variants {
1362                         match variant.fields {
1363                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1364                                 for i in offsets {
1365                                     if *i <= old_ity_size {
1366                                         assert_eq!(*i, old_ity_size);
1367                                         *i = new_ity_size;
1368                                     }
1369                                 }
1370                                 // We might be making the struct larger.
1371                                 if variant.size <= old_ity_size {
1372                                     variant.size = new_ity_size;
1373                                 }
1374                             }
1375                             _ => bug!(),
1376                         }
1377                     }
1378                 }
1379
1380                 let tag_mask = ity.size().unsigned_int_max();
1381                 let tag = Scalar::Initialized {
1382                     value: Int(ity, signed),
1383                     valid_range: WrappingRange {
1384                         start: (min as u128 & tag_mask),
1385                         end: (max as u128 & tag_mask),
1386                     },
1387                 };
1388                 let mut abi = Abi::Aggregate { sized: true };
1389
1390                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1391                     abi = Abi::Uninhabited;
1392                 } else if tag.size(dl) == size {
1393                     // Make sure we only use scalar layout when the enum is entirely its
1394                     // own tag (i.e. it has no padding nor any non-ZST variant fields).
1395                     abi = Abi::Scalar(tag);
1396                 } else {
1397                     // Try to use a ScalarPair for all tagged enums.
1398                     let mut common_prim = None;
1399                     let mut common_prim_initialized_in_all_variants = true;
1400                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1401                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1402                             bug!();
1403                         };
1404                         let mut fields =
1405                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1406                         let (field, offset) = match (fields.next(), fields.next()) {
1407                             (None, None) => {
1408                                 common_prim_initialized_in_all_variants = false;
1409                                 continue;
1410                             }
1411                             (Some(pair), None) => pair,
1412                             _ => {
1413                                 common_prim = None;
1414                                 break;
1415                             }
1416                         };
1417                         let prim = match field.abi {
1418                             Abi::Scalar(scalar) => {
1419                                 common_prim_initialized_in_all_variants &=
1420                                     matches!(scalar, Scalar::Initialized { .. });
1421                                 scalar.primitive()
1422                             }
1423                             _ => {
1424                                 common_prim = None;
1425                                 break;
1426                             }
1427                         };
1428                         if let Some(pair) = common_prim {
1429                             // This is pretty conservative. We could go fancier
1430                             // by conflating things like i32 and u32, or even
1431                             // realising that (u8, u8) could just cohabit with
1432                             // u16 or even u32.
1433                             if pair != (prim, offset) {
1434                                 common_prim = None;
1435                                 break;
1436                             }
1437                         } else {
1438                             common_prim = Some((prim, offset));
1439                         }
1440                     }
1441                     if let Some((prim, offset)) = common_prim {
1442                         let prim_scalar = if common_prim_initialized_in_all_variants {
1443                             scalar_unit(prim)
1444                         } else {
1445                             // Common prim might be uninit.
1446                             Scalar::Union { value: prim }
1447                         };
1448                         let pair = self.scalar_pair(tag, prim_scalar);
1449                         let pair_offsets = match pair.fields {
1450                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1451                                 assert_eq!(memory_index, &[0, 1]);
1452                                 offsets
1453                             }
1454                             _ => bug!(),
1455                         };
1456                         if pair_offsets[0] == Size::ZERO
1457                             && pair_offsets[1] == *offset
1458                             && align == pair.align
1459                             && size == pair.size
1460                         {
1461                             // We can use `ScalarPair` only when it matches our
1462                             // already computed layout (including `#[repr(C)]`).
1463                             abi = pair.abi;
1464                         }
1465                     }
1466                 }
1467
1468                 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1469                 // variants to ensure they are consistent. This is because a downcast is
1470                 // semantically a NOP, and thus should not affect layout.
1471                 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1472                     for variant in &mut layout_variants {
1473                         // We only do this for variants with fields; the others are not accessed anyway.
1474                         // Also do not overwrite any already existing "clever" ABIs.
1475                         if variant.fields.count() > 0
1476                             && matches!(variant.abi, Abi::Aggregate { .. })
1477                         {
1478                             variant.abi = abi;
1479                             // Also need to bump up the size and alignment, so that the entire value fits in here.
1480                             variant.size = cmp::max(variant.size, size);
1481                             variant.align.abi = cmp::max(variant.align.abi, align.abi);
1482                         }
1483                     }
1484                 }
1485
1486                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1487
1488                 let tagged_layout = LayoutS {
1489                     variants: Variants::Multiple {
1490                         tag,
1491                         tag_encoding: TagEncoding::Direct,
1492                         tag_field: 0,
1493                         variants: IndexVec::new(),
1494                     },
1495                     fields: FieldsShape::Arbitrary {
1496                         offsets: vec![Size::ZERO],
1497                         memory_index: vec![0],
1498                     },
1499                     largest_niche,
1500                     abi,
1501                     align,
1502                     size,
1503                 };
1504
1505                 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1506
1507                 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1508                     (tl, Some(nl)) => {
1509                         // Pick the smaller layout; otherwise,
1510                         // pick the layout with the larger niche; otherwise,
1511                         // pick tagged as it has simpler codegen.
1512                         use Ordering::*;
1513                         let niche_size = |tmp_l: &TmpLayout<'_>| {
1514                             tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1515                         };
1516                         match (
1517                             tl.layout.size.cmp(&nl.layout.size),
1518                             niche_size(&tl).cmp(&niche_size(&nl)),
1519                         ) {
1520                             (Greater, _) => nl,
1521                             (Equal, Less) => nl,
1522                             _ => tl,
1523                         }
1524                     }
1525                     (tl, None) => tl,
1526                 };
1527
1528                 // Now we can intern the variant layouts and store them in the enum layout.
1529                 best_layout.layout.variants = match best_layout.layout.variants {
1530                     Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1531                         tag,
1532                         tag_encoding,
1533                         tag_field,
1534                         variants: best_layout
1535                             .variants
1536                             .into_iter()
1537                             .map(|layout| tcx.intern_layout(layout))
1538                             .collect(),
1539                     },
1540                     _ => bug!(),
1541                 };
1542
1543                 tcx.intern_layout(best_layout.layout)
1544             }
1545
1546             // Types with no meaningful known layout.
1547             ty::Projection(_) | ty::Opaque(..) => {
1548                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1549                 // if that was possible, so there's no reason to try again here.
1550                 return Err(LayoutError::Unknown(ty));
1551             }
1552
1553             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1554                 bug!("Layout::compute: unexpected type `{}`", ty)
1555             }
1556
1557             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1558                 return Err(LayoutError::Unknown(ty));
1559             }
1560         })
1561     }
1562 }
1563
1564 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1565 #[derive(Clone, Debug, PartialEq)]
1566 enum SavedLocalEligibility {
1567     Unassigned,
1568     Assigned(VariantIdx),
1569     // FIXME: Use newtype_index so we aren't wasting bytes
1570     Ineligible(Option<u32>),
1571 }
1572
1573 // When laying out generators, we divide our saved local fields into two
1574 // categories: overlap-eligible and overlap-ineligible.
1575 //
1576 // Those fields which are ineligible for overlap go in a "prefix" at the
1577 // beginning of the layout, and always have space reserved for them.
1578 //
1579 // Overlap-eligible fields are only assigned to one variant, so we lay
1580 // those fields out for each variant and put them right after the
1581 // prefix.
1582 //
1583 // Finally, in the layout details, we point to the fields from the
1584 // variants they are assigned to. It is possible for some fields to be
1585 // included in multiple variants. No field ever "moves around" in the
1586 // layout; its offset is always the same.
1587 //
1588 // Also included in the layout are the upvars and the discriminant.
1589 // These are included as fields on the "outer" layout; they are not part
1590 // of any variant.
1591 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1592     /// Compute the eligibility and assignment of each local.
1593     fn generator_saved_local_eligibility(
1594         &self,
1595         info: &GeneratorLayout<'tcx>,
1596     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1597         use SavedLocalEligibility::*;
1598
1599         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1600             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1601
1602         // The saved locals not eligible for overlap. These will get
1603         // "promoted" to the prefix of our generator.
1604         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1605
1606         // Figure out which of our saved locals are fields in only
1607         // one variant. The rest are deemed ineligible for overlap.
1608         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1609             for local in fields {
1610                 match assignments[*local] {
1611                     Unassigned => {
1612                         assignments[*local] = Assigned(variant_index);
1613                     }
1614                     Assigned(idx) => {
1615                         // We've already seen this local at another suspension
1616                         // point, so it is no longer a candidate.
1617                         trace!(
1618                             "removing local {:?} in >1 variant ({:?}, {:?})",
1619                             local,
1620                             variant_index,
1621                             idx
1622                         );
1623                         ineligible_locals.insert(*local);
1624                         assignments[*local] = Ineligible(None);
1625                     }
1626                     Ineligible(_) => {}
1627                 }
1628             }
1629         }
1630
1631         // Next, check every pair of eligible locals to see if they
1632         // conflict.
1633         for local_a in info.storage_conflicts.rows() {
1634             let conflicts_a = info.storage_conflicts.count(local_a);
1635             if ineligible_locals.contains(local_a) {
1636                 continue;
1637             }
1638
1639             for local_b in info.storage_conflicts.iter(local_a) {
1640                 // local_a and local_b are storage live at the same time, therefore they
1641                 // cannot overlap in the generator layout. The only way to guarantee
1642                 // this is if they are in the same variant, or one is ineligible
1643                 // (which means it is stored in every variant).
1644                 if ineligible_locals.contains(local_b)
1645                     || assignments[local_a] == assignments[local_b]
1646                 {
1647                     continue;
1648                 }
1649
1650                 // If they conflict, we will choose one to make ineligible.
1651                 // This is not always optimal; it's just a greedy heuristic that
1652                 // seems to produce good results most of the time.
1653                 let conflicts_b = info.storage_conflicts.count(local_b);
1654                 let (remove, other) =
1655                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1656                 ineligible_locals.insert(remove);
1657                 assignments[remove] = Ineligible(None);
1658                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1659             }
1660         }
1661
1662         // Count the number of variants in use. If only one of them, then it is
1663         // impossible to overlap any locals in our layout. In this case it's
1664         // always better to make the remaining locals ineligible, so we can
1665         // lay them out with the other locals in the prefix and eliminate
1666         // unnecessary padding bytes.
1667         {
1668             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1669             for assignment in &assignments {
1670                 if let Assigned(idx) = assignment {
1671                     used_variants.insert(*idx);
1672                 }
1673             }
1674             if used_variants.count() < 2 {
1675                 for assignment in assignments.iter_mut() {
1676                     *assignment = Ineligible(None);
1677                 }
1678                 ineligible_locals.insert_all();
1679             }
1680         }
1681
1682         // Write down the order of our locals that will be promoted to the prefix.
1683         {
1684             for (idx, local) in ineligible_locals.iter().enumerate() {
1685                 assignments[local] = Ineligible(Some(idx as u32));
1686             }
1687         }
1688         debug!("generator saved local assignments: {:?}", assignments);
1689
1690         (ineligible_locals, assignments)
1691     }
1692
1693     /// Compute the full generator layout.
1694     fn generator_layout(
1695         &self,
1696         ty: Ty<'tcx>,
1697         def_id: hir::def_id::DefId,
1698         substs: SubstsRef<'tcx>,
1699     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1700         use SavedLocalEligibility::*;
1701         let tcx = self.tcx;
1702         let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1703
1704         let Some(info) = tcx.generator_layout(def_id) else {
1705             return Err(LayoutError::Unknown(ty));
1706         };
1707         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1708
1709         // Build a prefix layout, including "promoting" all ineligible
1710         // locals as part of the prefix. We compute the layout of all of
1711         // these fields at once to get optimal packing.
1712         let tag_index = substs.as_generator().prefix_tys().count();
1713
1714         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1715         let max_discr = (info.variant_fields.len() - 1) as u128;
1716         let discr_int = Integer::fit_unsigned(max_discr);
1717         let discr_int_ty = discr_int.to_ty(tcx, false);
1718         let tag = Scalar::Initialized {
1719             value: Primitive::Int(discr_int, false),
1720             valid_range: WrappingRange { start: 0, end: max_discr },
1721         };
1722         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1723         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1724
1725         let promoted_layouts = ineligible_locals
1726             .iter()
1727             .map(|local| subst_field(info.field_tys[local]))
1728             .map(|ty| tcx.mk_maybe_uninit(ty))
1729             .map(|ty| self.layout_of(ty));
1730         let prefix_layouts = substs
1731             .as_generator()
1732             .prefix_tys()
1733             .map(|ty| self.layout_of(ty))
1734             .chain(iter::once(Ok(tag_layout)))
1735             .chain(promoted_layouts)
1736             .collect::<Result<Vec<_>, _>>()?;
1737         let prefix = self.univariant_uninterned(
1738             ty,
1739             &prefix_layouts,
1740             &ReprOptions::default(),
1741             StructKind::AlwaysSized,
1742         )?;
1743
1744         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1745
1746         // Split the prefix layout into the "outer" fields (upvars and
1747         // discriminant) and the "promoted" fields. Promoted fields will
1748         // get included in each variant that requested them in
1749         // GeneratorLayout.
1750         debug!("prefix = {:#?}", prefix);
1751         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1752             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1753                 let mut inverse_memory_index = invert_mapping(&memory_index);
1754
1755                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1756                 // "outer" and "promoted" fields respectively.
1757                 let b_start = (tag_index + 1) as u32;
1758                 let offsets_b = offsets.split_off(b_start as usize);
1759                 let offsets_a = offsets;
1760
1761                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1762                 // by preserving the order but keeping only one disjoint "half" each.
1763                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1764                 let inverse_memory_index_b: Vec<_> =
1765                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1766                 inverse_memory_index.retain(|&i| i < b_start);
1767                 let inverse_memory_index_a = inverse_memory_index;
1768
1769                 // Since `inverse_memory_index_{a,b}` each only refer to their
1770                 // respective fields, they can be safely inverted
1771                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1772                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1773
1774                 let outer_fields =
1775                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1776                 (outer_fields, offsets_b, memory_index_b)
1777             }
1778             _ => bug!(),
1779         };
1780
1781         let mut size = prefix.size;
1782         let mut align = prefix.align;
1783         let variants = info
1784             .variant_fields
1785             .iter_enumerated()
1786             .map(|(index, variant_fields)| {
1787                 // Only include overlap-eligible fields when we compute our variant layout.
1788                 let variant_only_tys = variant_fields
1789                     .iter()
1790                     .filter(|local| match assignments[**local] {
1791                         Unassigned => bug!(),
1792                         Assigned(v) if v == index => true,
1793                         Assigned(_) => bug!("assignment does not match variant"),
1794                         Ineligible(_) => false,
1795                     })
1796                     .map(|local| subst_field(info.field_tys[*local]));
1797
1798                 let mut variant = self.univariant_uninterned(
1799                     ty,
1800                     &variant_only_tys
1801                         .map(|ty| self.layout_of(ty))
1802                         .collect::<Result<Vec<_>, _>>()?,
1803                     &ReprOptions::default(),
1804                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1805                 )?;
1806                 variant.variants = Variants::Single { index };
1807
1808                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1809                     bug!();
1810                 };
1811
1812                 // Now, stitch the promoted and variant-only fields back together in
1813                 // the order they are mentioned by our GeneratorLayout.
1814                 // Because we only use some subset (that can differ between variants)
1815                 // of the promoted fields, we can't just pick those elements of the
1816                 // `promoted_memory_index` (as we'd end up with gaps).
1817                 // So instead, we build an "inverse memory_index", as if all of the
1818                 // promoted fields were being used, but leave the elements not in the
1819                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1820                 // obtain a valid (bijective) mapping.
1821                 const INVALID_FIELD_IDX: u32 = !0;
1822                 let mut combined_inverse_memory_index =
1823                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1824                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1825                 let combined_offsets = variant_fields
1826                     .iter()
1827                     .enumerate()
1828                     .map(|(i, local)| {
1829                         let (offset, memory_index) = match assignments[*local] {
1830                             Unassigned => bug!(),
1831                             Assigned(_) => {
1832                                 let (offset, memory_index) =
1833                                     offsets_and_memory_index.next().unwrap();
1834                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1835                             }
1836                             Ineligible(field_idx) => {
1837                                 let field_idx = field_idx.unwrap() as usize;
1838                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1839                             }
1840                         };
1841                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1842                         offset
1843                     })
1844                     .collect();
1845
1846                 // Remove the unused slots and invert the mapping to obtain the
1847                 // combined `memory_index` (also see previous comment).
1848                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1849                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1850
1851                 variant.fields = FieldsShape::Arbitrary {
1852                     offsets: combined_offsets,
1853                     memory_index: combined_memory_index,
1854                 };
1855
1856                 size = size.max(variant.size);
1857                 align = align.max(variant.align);
1858                 Ok(tcx.intern_layout(variant))
1859             })
1860             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1861
1862         size = size.align_to(align.abi);
1863
1864         let abi =
1865             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1866                 Abi::Uninhabited
1867             } else {
1868                 Abi::Aggregate { sized: true }
1869             };
1870
1871         let layout = tcx.intern_layout(LayoutS {
1872             variants: Variants::Multiple {
1873                 tag,
1874                 tag_encoding: TagEncoding::Direct,
1875                 tag_field: tag_index,
1876                 variants,
1877             },
1878             fields: outer_fields,
1879             abi,
1880             largest_niche: prefix.largest_niche,
1881             size,
1882             align,
1883         });
1884         debug!("generator layout ({:?}): {:#?}", ty, layout);
1885         Ok(layout)
1886     }
1887
1888     /// This is invoked by the `layout_of` query to record the final
1889     /// layout of each type.
1890     #[inline(always)]
1891     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1892         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1893         // for dumping later.
1894         if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1895             self.record_layout_for_printing_outlined(layout)
1896         }
1897     }
1898
1899     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1900         // Ignore layouts that are done with non-empty environments or
1901         // non-monomorphic layouts, as the user only wants to see the stuff
1902         // resulting from the final codegen session.
1903         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1904             return;
1905         }
1906
1907         // (delay format until we actually need it)
1908         let record = |kind, packed, opt_discr_size, variants| {
1909             let type_desc = format!("{:?}", layout.ty);
1910             self.tcx.sess.code_stats.record_type_size(
1911                 kind,
1912                 type_desc,
1913                 layout.align.abi,
1914                 layout.size,
1915                 packed,
1916                 opt_discr_size,
1917                 variants,
1918             );
1919         };
1920
1921         let adt_def = match *layout.ty.kind() {
1922             ty::Adt(ref adt_def, _) => {
1923                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1924                 adt_def
1925             }
1926
1927             ty::Closure(..) => {
1928                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1929                 record(DataTypeKind::Closure, false, None, vec![]);
1930                 return;
1931             }
1932
1933             _ => {
1934                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1935                 return;
1936             }
1937         };
1938
1939         let adt_kind = adt_def.adt_kind();
1940         let adt_packed = adt_def.repr().pack.is_some();
1941
1942         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1943             let mut min_size = Size::ZERO;
1944             let field_info: Vec<_> = flds
1945                 .iter()
1946                 .enumerate()
1947                 .map(|(i, &name)| {
1948                     let field_layout = layout.field(self, i);
1949                     let offset = layout.fields.offset(i);
1950                     let field_end = offset + field_layout.size;
1951                     if min_size < field_end {
1952                         min_size = field_end;
1953                     }
1954                     FieldInfo {
1955                         name,
1956                         offset: offset.bytes(),
1957                         size: field_layout.size.bytes(),
1958                         align: field_layout.align.abi.bytes(),
1959                     }
1960                 })
1961                 .collect();
1962
1963             VariantInfo {
1964                 name: n,
1965                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1966                 align: layout.align.abi.bytes(),
1967                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1968                 fields: field_info,
1969             }
1970         };
1971
1972         match layout.variants {
1973             Variants::Single { index } => {
1974                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1975                     debug!(
1976                         "print-type-size `{:#?}` variant {}",
1977                         layout,
1978                         adt_def.variant(index).name
1979                     );
1980                     let variant_def = &adt_def.variant(index);
1981                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1982                     record(
1983                         adt_kind.into(),
1984                         adt_packed,
1985                         None,
1986                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1987                     );
1988                 } else {
1989                     // (This case arises for *empty* enums; so give it
1990                     // zero variants.)
1991                     record(adt_kind.into(), adt_packed, None, vec![]);
1992                 }
1993             }
1994
1995             Variants::Multiple { tag, ref tag_encoding, .. } => {
1996                 debug!(
1997                     "print-type-size `{:#?}` adt general variants def {}",
1998                     layout.ty,
1999                     adt_def.variants().len()
2000                 );
2001                 let variant_infos: Vec<_> = adt_def
2002                     .variants()
2003                     .iter_enumerated()
2004                     .map(|(i, variant_def)| {
2005                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2006                         build_variant_info(
2007                             Some(variant_def.name),
2008                             &fields,
2009                             layout.for_variant(self, i),
2010                         )
2011                     })
2012                     .collect();
2013                 record(
2014                     adt_kind.into(),
2015                     adt_packed,
2016                     match tag_encoding {
2017                         TagEncoding::Direct => Some(tag.size(self)),
2018                         _ => None,
2019                     },
2020                     variant_infos,
2021                 );
2022             }
2023         }
2024     }
2025 }
2026
2027 /// Type size "skeleton", i.e., the only information determining a type's size.
2028 /// While this is conservative, (aside from constant sizes, only pointers,
2029 /// newtypes thereof and null pointer optimized enums are allowed), it is
2030 /// enough to statically check common use cases of transmute.
2031 #[derive(Copy, Clone, Debug)]
2032 pub enum SizeSkeleton<'tcx> {
2033     /// Any statically computable Layout.
2034     Known(Size),
2035
2036     /// A potentially-fat pointer.
2037     Pointer {
2038         /// If true, this pointer is never null.
2039         non_zero: bool,
2040         /// The type which determines the unsized metadata, if any,
2041         /// of this pointer. Either a type parameter or a projection
2042         /// depending on one, with regions erased.
2043         tail: Ty<'tcx>,
2044     },
2045 }
2046
2047 impl<'tcx> SizeSkeleton<'tcx> {
2048     pub fn compute(
2049         ty: Ty<'tcx>,
2050         tcx: TyCtxt<'tcx>,
2051         param_env: ty::ParamEnv<'tcx>,
2052     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2053         debug_assert!(!ty.has_infer_types_or_consts());
2054
2055         // First try computing a static layout.
2056         let err = match tcx.layout_of(param_env.and(ty)) {
2057             Ok(layout) => {
2058                 return Ok(SizeSkeleton::Known(layout.size));
2059             }
2060             Err(err) => err,
2061         };
2062
2063         match *ty.kind() {
2064             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2065                 let non_zero = !ty.is_unsafe_ptr();
2066                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2067                 match tail.kind() {
2068                     ty::Param(_) | ty::Projection(_) => {
2069                         debug_assert!(tail.has_param_types_or_consts());
2070                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2071                     }
2072                     _ => bug!(
2073                         "SizeSkeleton::compute({}): layout errored ({}), yet \
2074                               tail `{}` is not a type parameter or a projection",
2075                         ty,
2076                         err,
2077                         tail
2078                     ),
2079                 }
2080             }
2081
2082             ty::Adt(def, substs) => {
2083                 // Only newtypes and enums w/ nullable pointer optimization.
2084                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2085                     return Err(err);
2086                 }
2087
2088                 // Get a zero-sized variant or a pointer newtype.
2089                 let zero_or_ptr_variant = |i| {
2090                     let i = VariantIdx::new(i);
2091                     let fields =
2092                         def.variant(i).fields.iter().map(|field| {
2093                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2094                         });
2095                     let mut ptr = None;
2096                     for field in fields {
2097                         let field = field?;
2098                         match field {
2099                             SizeSkeleton::Known(size) => {
2100                                 if size.bytes() > 0 {
2101                                     return Err(err);
2102                                 }
2103                             }
2104                             SizeSkeleton::Pointer { .. } => {
2105                                 if ptr.is_some() {
2106                                     return Err(err);
2107                                 }
2108                                 ptr = Some(field);
2109                             }
2110                         }
2111                     }
2112                     Ok(ptr)
2113                 };
2114
2115                 let v0 = zero_or_ptr_variant(0)?;
2116                 // Newtype.
2117                 if def.variants().len() == 1 {
2118                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2119                         return Ok(SizeSkeleton::Pointer {
2120                             non_zero: non_zero
2121                                 || match tcx.layout_scalar_valid_range(def.did()) {
2122                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2123                                     (Bound::Included(start), Bound::Included(end)) => {
2124                                         0 < start && start < end
2125                                     }
2126                                     _ => false,
2127                                 },
2128                             tail,
2129                         });
2130                     } else {
2131                         return Err(err);
2132                     }
2133                 }
2134
2135                 let v1 = zero_or_ptr_variant(1)?;
2136                 // Nullable pointer enum optimization.
2137                 match (v0, v1) {
2138                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2139                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2140                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2141                     }
2142                     _ => Err(err),
2143                 }
2144             }
2145
2146             ty::Projection(_) | ty::Opaque(..) => {
2147                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2148                 if ty == normalized {
2149                     Err(err)
2150                 } else {
2151                     SizeSkeleton::compute(normalized, tcx, param_env)
2152                 }
2153             }
2154
2155             _ => Err(err),
2156         }
2157     }
2158
2159     pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2160         match (self, other) {
2161             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2162             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2163                 a == b
2164             }
2165             _ => false,
2166         }
2167     }
2168 }
2169
2170 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2171     fn tcx(&self) -> TyCtxt<'tcx>;
2172 }
2173
2174 pub trait HasParamEnv<'tcx> {
2175     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2176 }
2177
2178 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2179     #[inline]
2180     fn data_layout(&self) -> &TargetDataLayout {
2181         &self.data_layout
2182     }
2183 }
2184
2185 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2186     fn target_spec(&self) -> &Target {
2187         &self.sess.target
2188     }
2189 }
2190
2191 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2192     #[inline]
2193     fn tcx(&self) -> TyCtxt<'tcx> {
2194         *self
2195     }
2196 }
2197
2198 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2199     #[inline]
2200     fn data_layout(&self) -> &TargetDataLayout {
2201         &self.data_layout
2202     }
2203 }
2204
2205 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2206     fn target_spec(&self) -> &Target {
2207         &self.sess.target
2208     }
2209 }
2210
2211 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2212     #[inline]
2213     fn tcx(&self) -> TyCtxt<'tcx> {
2214         **self
2215     }
2216 }
2217
2218 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2219     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2220         self.param_env
2221     }
2222 }
2223
2224 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2225     fn data_layout(&self) -> &TargetDataLayout {
2226         self.tcx.data_layout()
2227     }
2228 }
2229
2230 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2231     fn target_spec(&self) -> &Target {
2232         self.tcx.target_spec()
2233     }
2234 }
2235
2236 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2237     fn tcx(&self) -> TyCtxt<'tcx> {
2238         self.tcx.tcx()
2239     }
2240 }
2241
2242 pub trait MaybeResult<T> {
2243     type Error;
2244
2245     fn from(x: Result<T, Self::Error>) -> Self;
2246     fn to_result(self) -> Result<T, Self::Error>;
2247 }
2248
2249 impl<T> MaybeResult<T> for T {
2250     type Error = !;
2251
2252     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2253         x
2254     }
2255     fn to_result(self) -> Result<T, Self::Error> {
2256         Ok(self)
2257     }
2258 }
2259
2260 impl<T, E> MaybeResult<T> for Result<T, E> {
2261     type Error = E;
2262
2263     fn from(x: Result<T, Self::Error>) -> Self {
2264         x
2265     }
2266     fn to_result(self) -> Result<T, Self::Error> {
2267         self
2268     }
2269 }
2270
2271 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2272
2273 /// Trait for contexts that want to be able to compute layouts of types.
2274 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2275 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2276     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2277     /// returned from `layout_of` (see also `handle_layout_err`).
2278     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2279
2280     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2281     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2282     #[inline]
2283     fn layout_tcx_at_span(&self) -> Span {
2284         DUMMY_SP
2285     }
2286
2287     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2288     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2289     ///
2290     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2291     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2292     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2293     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2294     fn handle_layout_err(
2295         &self,
2296         err: LayoutError<'tcx>,
2297         span: Span,
2298         ty: Ty<'tcx>,
2299     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2300 }
2301
2302 /// Blanket extension trait for contexts that can compute layouts of types.
2303 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2304     /// Computes the layout of a type. Note that this implicitly
2305     /// executes in "reveal all" mode, and will normalize the input type.
2306     #[inline]
2307     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2308         self.spanned_layout_of(ty, DUMMY_SP)
2309     }
2310
2311     /// Computes the layout of a type, at `span`. Note that this implicitly
2312     /// executes in "reveal all" mode, and will normalize the input type.
2313     // FIXME(eddyb) avoid passing information like this, and instead add more
2314     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2315     #[inline]
2316     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2317         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2318         let tcx = self.tcx().at(span);
2319
2320         MaybeResult::from(
2321             tcx.layout_of(self.param_env().and(ty))
2322                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2323         )
2324     }
2325 }
2326
2327 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2328
2329 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2330     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2331
2332     #[inline]
2333     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2334         err
2335     }
2336 }
2337
2338 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2339     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2340
2341     #[inline]
2342     fn layout_tcx_at_span(&self) -> Span {
2343         self.tcx.span
2344     }
2345
2346     #[inline]
2347     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2348         err
2349     }
2350 }
2351
2352 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2353 where
2354     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2355 {
2356     fn ty_and_layout_for_variant(
2357         this: TyAndLayout<'tcx>,
2358         cx: &C,
2359         variant_index: VariantIdx,
2360     ) -> TyAndLayout<'tcx> {
2361         let layout = match this.variants {
2362             Variants::Single { index }
2363                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2364                 if index == variant_index &&
2365                 // Don't confuse variants of uninhabited enums with the enum itself.
2366                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2367                 this.fields != FieldsShape::Primitive =>
2368             {
2369                 this.layout
2370             }
2371
2372             Variants::Single { index } => {
2373                 let tcx = cx.tcx();
2374                 let param_env = cx.param_env();
2375
2376                 // Deny calling for_variant more than once for non-Single enums.
2377                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2378                     assert_eq!(original_layout.variants, Variants::Single { index });
2379                 }
2380
2381                 let fields = match this.ty.kind() {
2382                     ty::Adt(def, _) if def.variants().is_empty() =>
2383                         bug!("for_variant called on zero-variant enum"),
2384                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2385                     _ => bug!(),
2386                 };
2387                 tcx.intern_layout(LayoutS {
2388                     variants: Variants::Single { index: variant_index },
2389                     fields: match NonZeroUsize::new(fields) {
2390                         Some(fields) => FieldsShape::Union(fields),
2391                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2392                     },
2393                     abi: Abi::Uninhabited,
2394                     largest_niche: None,
2395                     align: tcx.data_layout.i8_align,
2396                     size: Size::ZERO,
2397                 })
2398             }
2399
2400             Variants::Multiple { ref variants, .. } => variants[variant_index],
2401         };
2402
2403         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2404
2405         TyAndLayout { ty: this.ty, layout }
2406     }
2407
2408     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2409         enum TyMaybeWithLayout<'tcx> {
2410             Ty(Ty<'tcx>),
2411             TyAndLayout(TyAndLayout<'tcx>),
2412         }
2413
2414         fn field_ty_or_layout<'tcx>(
2415             this: TyAndLayout<'tcx>,
2416             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2417             i: usize,
2418         ) -> TyMaybeWithLayout<'tcx> {
2419             let tcx = cx.tcx();
2420             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2421                 TyAndLayout {
2422                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2423                     ty: tag.primitive().to_ty(tcx),
2424                 }
2425             };
2426
2427             match *this.ty.kind() {
2428                 ty::Bool
2429                 | ty::Char
2430                 | ty::Int(_)
2431                 | ty::Uint(_)
2432                 | ty::Float(_)
2433                 | ty::FnPtr(_)
2434                 | ty::Never
2435                 | ty::FnDef(..)
2436                 | ty::GeneratorWitness(..)
2437                 | ty::Foreign(..)
2438                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2439
2440                 // Potentially-fat pointers.
2441                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2442                     assert!(i < this.fields.count());
2443
2444                     // Reuse the fat `*T` type as its own thin pointer data field.
2445                     // This provides information about, e.g., DST struct pointees
2446                     // (which may have no non-DST form), and will work as long
2447                     // as the `Abi` or `FieldsShape` is checked by users.
2448                     if i == 0 {
2449                         let nil = tcx.mk_unit();
2450                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2451                             tcx.mk_mut_ptr(nil)
2452                         } else {
2453                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2454                         };
2455
2456                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2457                         // the `Result` should always work because the type is
2458                         // always either `*mut ()` or `&'static mut ()`.
2459                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2460                             ty: this.ty,
2461                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2462                         });
2463                     }
2464
2465                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2466                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2467                         ty::Dynamic(_, _) => {
2468                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2469                                 tcx.lifetimes.re_static,
2470                                 tcx.mk_array(tcx.types.usize, 3),
2471                             ))
2472                             /* FIXME: use actual fn pointers
2473                             Warning: naively computing the number of entries in the
2474                             vtable by counting the methods on the trait + methods on
2475                             all parent traits does not work, because some methods can
2476                             be not object safe and thus excluded from the vtable.
2477                             Increase this counter if you tried to implement this but
2478                             failed to do it without duplicating a lot of code from
2479                             other places in the compiler: 2
2480                             tcx.mk_tup(&[
2481                                 tcx.mk_array(tcx.types.usize, 3),
2482                                 tcx.mk_array(Option<fn()>),
2483                             ])
2484                             */
2485                         }
2486                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2487                     }
2488                 }
2489
2490                 // Arrays and slices.
2491                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2492                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2493
2494                 // Tuples, generators and closures.
2495                 ty::Closure(_, ref substs) => field_ty_or_layout(
2496                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2497                     cx,
2498                     i,
2499                 ),
2500
2501                 ty::Generator(def_id, ref substs, _) => match this.variants {
2502                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2503                         substs
2504                             .as_generator()
2505                             .state_tys(def_id, tcx)
2506                             .nth(index.as_usize())
2507                             .unwrap()
2508                             .nth(i)
2509                             .unwrap(),
2510                     ),
2511                     Variants::Multiple { tag, tag_field, .. } => {
2512                         if i == tag_field {
2513                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2514                         }
2515                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2516                     }
2517                 },
2518
2519                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2520
2521                 // ADTs.
2522                 ty::Adt(def, substs) => {
2523                     match this.variants {
2524                         Variants::Single { index } => {
2525                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2526                         }
2527
2528                         // Discriminant field for enums (where applicable).
2529                         Variants::Multiple { tag, .. } => {
2530                             assert_eq!(i, 0);
2531                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2532                         }
2533                     }
2534                 }
2535
2536                 ty::Projection(_)
2537                 | ty::Bound(..)
2538                 | ty::Placeholder(..)
2539                 | ty::Opaque(..)
2540                 | ty::Param(_)
2541                 | ty::Infer(_)
2542                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2543             }
2544         }
2545
2546         match field_ty_or_layout(this, cx, i) {
2547             TyMaybeWithLayout::Ty(field_ty) => {
2548                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2549                     bug!(
2550                         "failed to get layout for `{}`: {},\n\
2551                          despite it being a field (#{}) of an existing layout: {:#?}",
2552                         field_ty,
2553                         e,
2554                         i,
2555                         this
2556                     )
2557                 })
2558             }
2559             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2560         }
2561     }
2562
2563     fn ty_and_layout_pointee_info_at(
2564         this: TyAndLayout<'tcx>,
2565         cx: &C,
2566         offset: Size,
2567     ) -> Option<PointeeInfo> {
2568         let tcx = cx.tcx();
2569         let param_env = cx.param_env();
2570
2571         let addr_space_of_ty = |ty: Ty<'tcx>| {
2572             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2573         };
2574
2575         let pointee_info = match *this.ty.kind() {
2576             ty::RawPtr(mt) if offset.bytes() == 0 => {
2577                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2578                     size: layout.size,
2579                     align: layout.align.abi,
2580                     safe: None,
2581                     address_space: addr_space_of_ty(mt.ty),
2582                 })
2583             }
2584             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2585                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2586                     size: layout.size,
2587                     align: layout.align.abi,
2588                     safe: None,
2589                     address_space: cx.data_layout().instruction_address_space,
2590                 })
2591             }
2592             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2593                 let address_space = addr_space_of_ty(ty);
2594                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2595                     // Use conservative pointer kind if not optimizing. This saves us the
2596                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2597                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2598                     PointerKind::SharedMutable
2599                 } else {
2600                     match mt {
2601                         hir::Mutability::Not => {
2602                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2603                                 PointerKind::Frozen
2604                             } else {
2605                                 PointerKind::SharedMutable
2606                             }
2607                         }
2608                         hir::Mutability::Mut => {
2609                             // References to self-referential structures should not be considered
2610                             // noalias, as another pointer to the structure can be obtained, that
2611                             // is not based-on the original reference. We consider all !Unpin
2612                             // types to be potentially self-referential here.
2613                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2614                                 PointerKind::UniqueBorrowed
2615                             } else {
2616                                 PointerKind::UniqueBorrowedPinned
2617                             }
2618                         }
2619                     }
2620                 };
2621
2622                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2623                     size: layout.size,
2624                     align: layout.align.abi,
2625                     safe: Some(kind),
2626                     address_space,
2627                 })
2628             }
2629
2630             _ => {
2631                 let mut data_variant = match this.variants {
2632                     // Within the discriminant field, only the niche itself is
2633                     // always initialized, so we only check for a pointer at its
2634                     // offset.
2635                     //
2636                     // If the niche is a pointer, it's either valid (according
2637                     // to its type), or null (which the niche field's scalar
2638                     // validity range encodes).  This allows using
2639                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2640                     // this will continue to work as long as we don't start
2641                     // using more niches than just null (e.g., the first page of
2642                     // the address space, or unaligned pointers).
2643                     Variants::Multiple {
2644                         tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2645                         tag_field,
2646                         ..
2647                     } if this.fields.offset(tag_field) == offset => {
2648                         Some(this.for_variant(cx, untagged_variant))
2649                     }
2650                     _ => Some(this),
2651                 };
2652
2653                 if let Some(variant) = data_variant {
2654                     // We're not interested in any unions.
2655                     if let FieldsShape::Union(_) = variant.fields {
2656                         data_variant = None;
2657                     }
2658                 }
2659
2660                 let mut result = None;
2661
2662                 if let Some(variant) = data_variant {
2663                     let ptr_end = offset + Pointer.size(cx);
2664                     for i in 0..variant.fields.count() {
2665                         let field_start = variant.fields.offset(i);
2666                         if field_start <= offset {
2667                             let field = variant.field(cx, i);
2668                             result = field.to_result().ok().and_then(|field| {
2669                                 if ptr_end <= field_start + field.size {
2670                                     // We found the right field, look inside it.
2671                                     let field_info =
2672                                         field.pointee_info_at(cx, offset - field_start);
2673                                     field_info
2674                                 } else {
2675                                     None
2676                                 }
2677                             });
2678                             if result.is_some() {
2679                                 break;
2680                             }
2681                         }
2682                     }
2683                 }
2684
2685                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2686                 if let Some(ref mut pointee) = result {
2687                     if let ty::Adt(def, _) = this.ty.kind() {
2688                         if def.is_box() && offset.bytes() == 0 {
2689                             pointee.safe = Some(PointerKind::UniqueOwned);
2690                         }
2691                     }
2692                 }
2693
2694                 result
2695             }
2696         };
2697
2698         debug!(
2699             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2700             offset,
2701             this.ty.kind(),
2702             pointee_info
2703         );
2704
2705         pointee_info
2706     }
2707
2708     fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2709         matches!(this.ty.kind(), ty::Adt(..))
2710     }
2711
2712     fn is_never(this: TyAndLayout<'tcx>) -> bool {
2713         this.ty.kind() == &ty::Never
2714     }
2715
2716     fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2717         matches!(this.ty.kind(), ty::Tuple(..))
2718     }
2719
2720     fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2721         matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2722     }
2723 }
2724
2725 impl<'tcx> ty::Instance<'tcx> {
2726     // NOTE(eddyb) this is private to avoid using it from outside of
2727     // `fn_abi_of_instance` - any other uses are either too high-level
2728     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2729     // or should go through `FnAbi` instead, to avoid losing any
2730     // adjustments `fn_abi_of_instance` might be performing.
2731     fn fn_sig_for_fn_abi(
2732         &self,
2733         tcx: TyCtxt<'tcx>,
2734         param_env: ty::ParamEnv<'tcx>,
2735     ) -> ty::PolyFnSig<'tcx> {
2736         let ty = self.ty(tcx, param_env);
2737         match *ty.kind() {
2738             ty::FnDef(..) => {
2739                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2740                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2741                 // (i.e. due to being inside a projection that got normalized, see
2742                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2743                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2744                 let mut sig = match *ty.kind() {
2745                     ty::FnDef(def_id, substs) => tcx
2746                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
2747                         .subst(tcx, substs),
2748                     _ => unreachable!(),
2749                 };
2750
2751                 if let ty::InstanceDef::VTableShim(..) = self.def {
2752                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2753                     sig = sig.map_bound(|mut sig| {
2754                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2755                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2756                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2757                         sig
2758                     });
2759                 }
2760                 sig
2761             }
2762             ty::Closure(def_id, substs) => {
2763                 let sig = substs.as_closure().sig();
2764
2765                 let bound_vars = tcx.mk_bound_variable_kinds(
2766                     sig.bound_vars()
2767                         .iter()
2768                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2769                 );
2770                 let br = ty::BoundRegion {
2771                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2772                     kind: ty::BoundRegionKind::BrEnv,
2773                 };
2774                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2775                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2776
2777                 let sig = sig.skip_binder();
2778                 ty::Binder::bind_with_vars(
2779                     tcx.mk_fn_sig(
2780                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2781                         sig.output(),
2782                         sig.c_variadic,
2783                         sig.unsafety,
2784                         sig.abi,
2785                     ),
2786                     bound_vars,
2787                 )
2788             }
2789             ty::Generator(_, substs, _) => {
2790                 let sig = substs.as_generator().poly_sig();
2791
2792                 let bound_vars = tcx.mk_bound_variable_kinds(
2793                     sig.bound_vars()
2794                         .iter()
2795                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2796                 );
2797                 let br = ty::BoundRegion {
2798                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2799                     kind: ty::BoundRegionKind::BrEnv,
2800                 };
2801                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2802                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2803
2804                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2805                 let pin_adt_ref = tcx.adt_def(pin_did);
2806                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2807                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2808
2809                 let sig = sig.skip_binder();
2810                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2811                 let state_adt_ref = tcx.adt_def(state_did);
2812                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2813                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2814                 ty::Binder::bind_with_vars(
2815                     tcx.mk_fn_sig(
2816                         [env_ty, sig.resume_ty].iter(),
2817                         &ret_ty,
2818                         false,
2819                         hir::Unsafety::Normal,
2820                         rustc_target::spec::abi::Abi::Rust,
2821                     ),
2822                     bound_vars,
2823                 )
2824             }
2825             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2826         }
2827     }
2828 }
2829
2830 /// Calculates whether a function's ABI can unwind or not.
2831 ///
2832 /// This takes two primary parameters:
2833 ///
2834 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2835 ///   codegen attrs for a defined function. For function pointers this set of
2836 ///   flags is the empty set. This is only applicable for Rust-defined
2837 ///   functions, and generally isn't needed except for small optimizations where
2838 ///   we try to say a function which otherwise might look like it could unwind
2839 ///   doesn't actually unwind (such as for intrinsics and such).
2840 ///
2841 /// * `abi` - this is the ABI that the function is defined with. This is the
2842 ///   primary factor for determining whether a function can unwind or not.
2843 ///
2844 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2845 /// panics are implemented with unwinds on most platform (when
2846 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2847 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2848 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2849 /// defined for each ABI individually, but it always corresponds to some form of
2850 /// stack-based unwinding (the exact mechanism of which varies
2851 /// platform-by-platform).
2852 ///
2853 /// Rust functions are classified whether or not they can unwind based on the
2854 /// active "panic strategy". In other words Rust functions are considered to
2855 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2856 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2857 /// only if the final panic mode is panic=abort. In this scenario any code
2858 /// previously compiled assuming that a function can unwind is still correct, it
2859 /// just never happens to actually unwind at runtime.
2860 ///
2861 /// This function's answer to whether or not a function can unwind is quite
2862 /// impactful throughout the compiler. This affects things like:
2863 ///
2864 /// * Calling a function which can't unwind means codegen simply ignores any
2865 ///   associated unwinding cleanup.
2866 /// * Calling a function which can unwind from a function which can't unwind
2867 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2868 ///   aborts the process.
2869 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2870 ///   affects various optimizations and codegen.
2871 ///
2872 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2873 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2874 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2875 /// might (from a foreign exception or similar).
2876 #[inline]
2877 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2878     if let Some(did) = fn_def_id {
2879         // Special attribute for functions which can't unwind.
2880         if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2881             return false;
2882         }
2883
2884         // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2885         //
2886         // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2887         // function defined in Rust is also required to abort.
2888         if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2889             return false;
2890         }
2891
2892         // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2893         //
2894         // This is not part of `codegen_fn_attrs` as it can differ between crates
2895         // and therefore cannot be computed in core.
2896         if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2897             if Some(did) == tcx.lang_items().drop_in_place_fn() {
2898                 return false;
2899             }
2900         }
2901     }
2902
2903     // Otherwise if this isn't special then unwinding is generally determined by
2904     // the ABI of the itself. ABIs like `C` have variants which also
2905     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2906     // ABIs have such an option. Otherwise the only other thing here is Rust
2907     // itself, and those ABIs are determined by the panic strategy configured
2908     // for this compilation.
2909     //
2910     // Unfortunately at this time there's also another caveat. Rust [RFC
2911     // 2945][rfc] has been accepted and is in the process of being implemented
2912     // and stabilized. In this interim state we need to deal with historical
2913     // rustc behavior as well as plan for future rustc behavior.
2914     //
2915     // Historically functions declared with `extern "C"` were marked at the
2916     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2917     // or not. This is UB for functions in `panic=unwind` mode that then
2918     // actually panic and unwind. Note that this behavior is true for both
2919     // externally declared functions as well as Rust-defined function.
2920     //
2921     // To fix this UB rustc would like to change in the future to catch unwinds
2922     // from function calls that may unwind within a Rust-defined `extern "C"`
2923     // function and forcibly abort the process, thereby respecting the
2924     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2925     // ready to roll out, so determining whether or not the `C` family of ABIs
2926     // unwinds is conditional not only on their definition but also whether the
2927     // `#![feature(c_unwind)]` feature gate is active.
2928     //
2929     // Note that this means that unlike historical compilers rustc now, by
2930     // default, unconditionally thinks that the `C` ABI may unwind. This will
2931     // prevent some optimization opportunities, however, so we try to scope this
2932     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2933     // to `panic=abort`).
2934     //
2935     // Eventually the check against `c_unwind` here will ideally get removed and
2936     // this'll be a little cleaner as it'll be a straightforward check of the
2937     // ABI.
2938     //
2939     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2940     use SpecAbi::*;
2941     match abi {
2942         C { unwind }
2943         | System { unwind }
2944         | Cdecl { unwind }
2945         | Stdcall { unwind }
2946         | Fastcall { unwind }
2947         | Vectorcall { unwind }
2948         | Thiscall { unwind }
2949         | Aapcs { unwind }
2950         | Win64 { unwind }
2951         | SysV64 { unwind } => {
2952             unwind
2953                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2954         }
2955         PtxKernel
2956         | Msp430Interrupt
2957         | X86Interrupt
2958         | AmdGpuKernel
2959         | EfiApi
2960         | AvrInterrupt
2961         | AvrNonBlockingInterrupt
2962         | CCmseNonSecureCall
2963         | Wasm
2964         | RustIntrinsic
2965         | PlatformIntrinsic
2966         | Unadjusted => false,
2967         Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2968     }
2969 }
2970
2971 #[inline]
2972 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2973     use rustc_target::spec::abi::Abi::*;
2974     match tcx.sess.target.adjust_abi(abi) {
2975         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2976         RustCold => Conv::RustCold,
2977
2978         // It's the ABI's job to select this, not ours.
2979         System { .. } => bug!("system abi should be selected elsewhere"),
2980         EfiApi => bug!("eficall abi should be selected elsewhere"),
2981
2982         Stdcall { .. } => Conv::X86Stdcall,
2983         Fastcall { .. } => Conv::X86Fastcall,
2984         Vectorcall { .. } => Conv::X86VectorCall,
2985         Thiscall { .. } => Conv::X86ThisCall,
2986         C { .. } => Conv::C,
2987         Unadjusted => Conv::C,
2988         Win64 { .. } => Conv::X86_64Win64,
2989         SysV64 { .. } => Conv::X86_64SysV,
2990         Aapcs { .. } => Conv::ArmAapcs,
2991         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2992         PtxKernel => Conv::PtxKernel,
2993         Msp430Interrupt => Conv::Msp430Intr,
2994         X86Interrupt => Conv::X86Intr,
2995         AmdGpuKernel => Conv::AmdGpuKernel,
2996         AvrInterrupt => Conv::AvrInterrupt,
2997         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2998         Wasm => Conv::C,
2999
3000         // These API constants ought to be more specific...
3001         Cdecl { .. } => Conv::C,
3002     }
3003 }
3004
3005 /// Error produced by attempting to compute or adjust a `FnAbi`.
3006 #[derive(Copy, Clone, Debug, HashStable)]
3007 pub enum FnAbiError<'tcx> {
3008     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3009     Layout(LayoutError<'tcx>),
3010
3011     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3012     AdjustForForeignAbi(call::AdjustForForeignAbiError),
3013 }
3014
3015 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3016     fn from(err: LayoutError<'tcx>) -> Self {
3017         Self::Layout(err)
3018     }
3019 }
3020
3021 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3022     fn from(err: call::AdjustForForeignAbiError) -> Self {
3023         Self::AdjustForForeignAbi(err)
3024     }
3025 }
3026
3027 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3028     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3029         match self {
3030             Self::Layout(err) => err.fmt(f),
3031             Self::AdjustForForeignAbi(err) => err.fmt(f),
3032         }
3033     }
3034 }
3035
3036 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3037 // just for error handling.
3038 #[derive(Debug)]
3039 pub enum FnAbiRequest<'tcx> {
3040     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3041     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3042 }
3043
3044 /// Trait for contexts that want to be able to compute `FnAbi`s.
3045 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3046 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3047     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3048     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3049     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3050
3051     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3052     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3053     ///
3054     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3055     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3056     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3057     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3058     fn handle_fn_abi_err(
3059         &self,
3060         err: FnAbiError<'tcx>,
3061         span: Span,
3062         fn_abi_request: FnAbiRequest<'tcx>,
3063     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3064 }
3065
3066 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3067 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3068     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3069     ///
3070     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3071     /// instead, where the instance is an `InstanceDef::Virtual`.
3072     #[inline]
3073     fn fn_abi_of_fn_ptr(
3074         &self,
3075         sig: ty::PolyFnSig<'tcx>,
3076         extra_args: &'tcx ty::List<Ty<'tcx>>,
3077     ) -> Self::FnAbiOfResult {
3078         // FIXME(eddyb) get a better `span` here.
3079         let span = self.layout_tcx_at_span();
3080         let tcx = self.tcx().at(span);
3081
3082         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3083             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3084         ))
3085     }
3086
3087     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3088     /// direct calls to an `fn`.
3089     ///
3090     /// NB: that includes virtual calls, which are represented by "direct calls"
3091     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3092     #[inline]
3093     fn fn_abi_of_instance(
3094         &self,
3095         instance: ty::Instance<'tcx>,
3096         extra_args: &'tcx ty::List<Ty<'tcx>>,
3097     ) -> Self::FnAbiOfResult {
3098         // FIXME(eddyb) get a better `span` here.
3099         let span = self.layout_tcx_at_span();
3100         let tcx = self.tcx().at(span);
3101
3102         MaybeResult::from(
3103             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3104                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3105                 // we can get some kind of span even if one wasn't provided.
3106                 // However, we don't do this early in order to avoid calling
3107                 // `def_span` unconditionally (which may have a perf penalty).
3108                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3109                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3110             }),
3111         )
3112     }
3113 }
3114
3115 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3116
3117 fn fn_abi_of_fn_ptr<'tcx>(
3118     tcx: TyCtxt<'tcx>,
3119     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3120 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3121     let (param_env, (sig, extra_args)) = query.into_parts();
3122
3123     LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3124 }
3125
3126 fn fn_abi_of_instance<'tcx>(
3127     tcx: TyCtxt<'tcx>,
3128     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3129 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3130     let (param_env, (instance, extra_args)) = query.into_parts();
3131
3132     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3133
3134     let caller_location = if instance.def.requires_caller_location(tcx) {
3135         Some(tcx.caller_location_ty())
3136     } else {
3137         None
3138     };
3139
3140     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3141         sig,
3142         extra_args,
3143         caller_location,
3144         Some(instance.def_id()),
3145         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3146     )
3147 }
3148
3149 // Handle safe Rust thin and fat pointers.
3150 pub fn adjust_for_rust_scalar<'tcx>(
3151     cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3152     attrs: &mut ArgAttributes,
3153     scalar: Scalar,
3154     layout: TyAndLayout<'tcx>,
3155     offset: Size,
3156     is_return: bool,
3157 ) {
3158     // Booleans are always a noundef i1 that needs to be zero-extended.
3159     if scalar.is_bool() {
3160         attrs.ext(ArgExtension::Zext);
3161         attrs.set(ArgAttribute::NoUndef);
3162         return;
3163     }
3164
3165     // Scalars which have invalid values cannot be undef.
3166     if !scalar.is_always_valid(&cx) {
3167         attrs.set(ArgAttribute::NoUndef);
3168     }
3169
3170     // Only pointer types handled below.
3171     let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3172
3173     if !valid_range.contains(0) {
3174         attrs.set(ArgAttribute::NonNull);
3175     }
3176
3177     if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3178         if let Some(kind) = pointee.safe {
3179             attrs.pointee_align = Some(pointee.align);
3180
3181             // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3182             // for the entire duration of the function as they can be deallocated
3183             // at any time. Same for shared mutable references. If LLVM had a
3184             // way to say "dereferenceable on entry" we could use it here.
3185             attrs.pointee_size = match kind {
3186                 PointerKind::UniqueBorrowed
3187                 | PointerKind::UniqueBorrowedPinned
3188                 | PointerKind::Frozen => pointee.size,
3189                 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3190             };
3191
3192             // `Box`, `&T`, and `&mut T` cannot be undef.
3193             // Note that this only applies to the value of the pointer itself;
3194             // this attribute doesn't make it UB for the pointed-to data to be undef.
3195             attrs.set(ArgAttribute::NoUndef);
3196
3197             // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3198             // `noalias` for it. This can be turned off using an unstable flag.
3199             // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3200             let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3201
3202             // `&mut` pointer parameters never alias other parameters,
3203             // or mutable global data
3204             //
3205             // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3206             // and can be marked as both `readonly` and `noalias`, as
3207             // LLVM's definition of `noalias` is based solely on memory
3208             // dependencies rather than pointer equality
3209             //
3210             // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3211             // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3212             // or not to actually emit the attribute. It can also be controlled with the
3213             // `-Zmutable-noalias` debugging option.
3214             let no_alias = match kind {
3215                 PointerKind::SharedMutable
3216                 | PointerKind::UniqueBorrowed
3217                 | PointerKind::UniqueBorrowedPinned => false,
3218                 PointerKind::UniqueOwned => noalias_for_box,
3219                 PointerKind::Frozen => !is_return,
3220             };
3221             if no_alias {
3222                 attrs.set(ArgAttribute::NoAlias);
3223             }
3224
3225             if kind == PointerKind::Frozen && !is_return {
3226                 attrs.set(ArgAttribute::ReadOnly);
3227             }
3228
3229             if kind == PointerKind::UniqueBorrowed && !is_return {
3230                 attrs.set(ArgAttribute::NoAliasMutRef);
3231             }
3232         }
3233     }
3234 }
3235
3236 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3237     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3238     // arguments of this method, into a separate `struct`.
3239     fn fn_abi_new_uncached(
3240         &self,
3241         sig: ty::PolyFnSig<'tcx>,
3242         extra_args: &[Ty<'tcx>],
3243         caller_location: Option<Ty<'tcx>>,
3244         fn_def_id: Option<DefId>,
3245         // FIXME(eddyb) replace this with something typed, like an `enum`.
3246         force_thin_self_ptr: bool,
3247     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3248         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3249
3250         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3251
3252         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3253
3254         let mut inputs = sig.inputs();
3255         let extra_args = if sig.abi == RustCall {
3256             assert!(!sig.c_variadic && extra_args.is_empty());
3257
3258             if let Some(input) = sig.inputs().last() {
3259                 if let ty::Tuple(tupled_arguments) = input.kind() {
3260                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3261                     tupled_arguments
3262                 } else {
3263                     bug!(
3264                         "argument to function with \"rust-call\" ABI \
3265                             is not a tuple"
3266                     );
3267                 }
3268             } else {
3269                 bug!(
3270                     "argument to function with \"rust-call\" ABI \
3271                         is not a tuple"
3272                 );
3273             }
3274         } else {
3275             assert!(sig.c_variadic || extra_args.is_empty());
3276             extra_args
3277         };
3278
3279         let target = &self.tcx.sess.target;
3280         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3281         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3282         let linux_s390x_gnu_like =
3283             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3284         let linux_sparc64_gnu_like =
3285             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3286         let linux_powerpc_gnu_like =
3287             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3288         use SpecAbi::*;
3289         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3290
3291         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3292             let is_return = arg_idx.is_none();
3293
3294             let layout = self.layout_of(ty)?;
3295             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3296                 // Don't pass the vtable, it's not an argument of the virtual fn.
3297                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3298                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3299                 make_thin_self_ptr(self, layout)
3300             } else {
3301                 layout
3302             };
3303
3304             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3305                 let mut attrs = ArgAttributes::new();
3306                 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3307                 attrs
3308             });
3309
3310             if arg.layout.is_zst() {
3311                 // For some forsaken reason, x86_64-pc-windows-gnu
3312                 // doesn't ignore zero-sized struct arguments.
3313                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3314                 if is_return
3315                     || rust_abi
3316                     || (!win_x64_gnu
3317                         && !linux_s390x_gnu_like
3318                         && !linux_sparc64_gnu_like
3319                         && !linux_powerpc_gnu_like)
3320                 {
3321                     arg.mode = PassMode::Ignore;
3322                 }
3323             }
3324
3325             Ok(arg)
3326         };
3327
3328         let mut fn_abi = FnAbi {
3329             ret: arg_of(sig.output(), None)?,
3330             args: inputs
3331                 .iter()
3332                 .copied()
3333                 .chain(extra_args.iter().copied())
3334                 .chain(caller_location)
3335                 .enumerate()
3336                 .map(|(i, ty)| arg_of(ty, Some(i)))
3337                 .collect::<Result<_, _>>()?,
3338             c_variadic: sig.c_variadic,
3339             fixed_count: inputs.len() as u32,
3340             conv,
3341             can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3342         };
3343         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3344         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3345         Ok(self.tcx.arena.alloc(fn_abi))
3346     }
3347
3348     fn fn_abi_adjust_for_abi(
3349         &self,
3350         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3351         abi: SpecAbi,
3352     ) -> Result<(), FnAbiError<'tcx>> {
3353         if abi == SpecAbi::Unadjusted {
3354             return Ok(());
3355         }
3356
3357         if abi == SpecAbi::Rust
3358             || abi == SpecAbi::RustCall
3359             || abi == SpecAbi::RustIntrinsic
3360             || abi == SpecAbi::PlatformIntrinsic
3361         {
3362             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3363                 if arg.is_ignore() {
3364                     return;
3365                 }
3366
3367                 match arg.layout.abi {
3368                     Abi::Aggregate { .. } => {}
3369
3370                     // This is a fun case! The gist of what this is doing is
3371                     // that we want callers and callees to always agree on the
3372                     // ABI of how they pass SIMD arguments. If we were to *not*
3373                     // make these arguments indirect then they'd be immediates
3374                     // in LLVM, which means that they'd used whatever the
3375                     // appropriate ABI is for the callee and the caller. That
3376                     // means, for example, if the caller doesn't have AVX
3377                     // enabled but the callee does, then passing an AVX argument
3378                     // across this boundary would cause corrupt data to show up.
3379                     //
3380                     // This problem is fixed by unconditionally passing SIMD
3381                     // arguments through memory between callers and callees
3382                     // which should get them all to agree on ABI regardless of
3383                     // target feature sets. Some more information about this
3384                     // issue can be found in #44367.
3385                     //
3386                     // Note that the platform intrinsic ABI is exempt here as
3387                     // that's how we connect up to LLVM and it's unstable
3388                     // anyway, we control all calls to it in libstd.
3389                     Abi::Vector { .. }
3390                         if abi != SpecAbi::PlatformIntrinsic
3391                             && self.tcx.sess.target.simd_types_indirect =>
3392                     {
3393                         arg.make_indirect();
3394                         return;
3395                     }
3396
3397                     _ => return,
3398                 }
3399
3400                 let size = arg.layout.size;
3401                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3402                     arg.make_indirect();
3403                 } else {
3404                     // We want to pass small aggregates as immediates, but using
3405                     // a LLVM aggregate type for this leads to bad optimizations,
3406                     // so we pick an appropriately sized integer type instead.
3407                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3408                 }
3409             };
3410             fixup(&mut fn_abi.ret);
3411             for arg in fn_abi.args.iter_mut() {
3412                 fixup(arg);
3413             }
3414         } else {
3415             fn_abi.adjust_for_foreign_abi(self, abi)?;
3416         }
3417
3418         Ok(())
3419     }
3420 }
3421
3422 fn make_thin_self_ptr<'tcx>(
3423     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3424     layout: TyAndLayout<'tcx>,
3425 ) -> TyAndLayout<'tcx> {
3426     let tcx = cx.tcx();
3427     let fat_pointer_ty = if layout.is_unsized() {
3428         // unsized `self` is passed as a pointer to `self`
3429         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3430         tcx.mk_mut_ptr(layout.ty)
3431     } else {
3432         match layout.abi {
3433             Abi::ScalarPair(..) => (),
3434             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3435         }
3436
3437         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3438         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3439         // elsewhere in the compiler as a method on a `dyn Trait`.
3440         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3441         // get a built-in pointer type
3442         let mut fat_pointer_layout = layout;
3443         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3444             && !fat_pointer_layout.ty.is_region_ptr()
3445         {
3446             for i in 0..fat_pointer_layout.fields.count() {
3447                 let field_layout = fat_pointer_layout.field(cx, i);
3448
3449                 if !field_layout.is_zst() {
3450                     fat_pointer_layout = field_layout;
3451                     continue 'descend_newtypes;
3452                 }
3453             }
3454
3455             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3456         }
3457
3458         fat_pointer_layout.ty
3459     };
3460
3461     // we now have a type like `*mut RcBox<dyn Trait>`
3462     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3463     // this is understood as a special case elsewhere in the compiler
3464     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3465
3466     TyAndLayout {
3467         ty: fat_pointer_ty,
3468
3469         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3470         // should always work because the type is always `*mut ()`.
3471         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3472     }
3473 }