]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Auto merge of #95835 - Dylan-DPC:rollup-l5mf2ad, r=Dylan-DPC
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_hir as hir;
9 use rustc_hir::lang_items::LangItem;
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::{Idx, IndexVec};
12 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::{Span, DUMMY_SP};
15 use rustc_target::abi::call::{
16     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
17 };
18 use rustc_target::abi::*;
19 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
20
21 use std::cmp;
22 use std::fmt;
23 use std::iter;
24 use std::num::NonZeroUsize;
25 use std::ops::Bound;
26
27 use rand::{seq::SliceRandom, SeedableRng};
28 use rand_xoshiro::Xoshiro128StarStar;
29
30 pub fn provide(providers: &mut ty::query::Providers) {
31     *providers =
32         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
33 }
34
35 pub trait IntegerExt {
36     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
37     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
38     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
39     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
40     fn repr_discr<'tcx>(
41         tcx: TyCtxt<'tcx>,
42         ty: Ty<'tcx>,
43         repr: &ReprOptions,
44         min: i128,
45         max: i128,
46     ) -> (Integer, bool);
47 }
48
49 impl IntegerExt for Integer {
50     #[inline]
51     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
52         match (*self, signed) {
53             (I8, false) => tcx.types.u8,
54             (I16, false) => tcx.types.u16,
55             (I32, false) => tcx.types.u32,
56             (I64, false) => tcx.types.u64,
57             (I128, false) => tcx.types.u128,
58             (I8, true) => tcx.types.i8,
59             (I16, true) => tcx.types.i16,
60             (I32, true) => tcx.types.i32,
61             (I64, true) => tcx.types.i64,
62             (I128, true) => tcx.types.i128,
63         }
64     }
65
66     /// Gets the Integer type from an attr::IntType.
67     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
68         let dl = cx.data_layout();
69
70         match ity {
71             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
72             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
73             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
74             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
75             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
76             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
77                 dl.ptr_sized_integer()
78             }
79         }
80     }
81
82     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
83         match ity {
84             ty::IntTy::I8 => I8,
85             ty::IntTy::I16 => I16,
86             ty::IntTy::I32 => I32,
87             ty::IntTy::I64 => I64,
88             ty::IntTy::I128 => I128,
89             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
90         }
91     }
92     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
93         match ity {
94             ty::UintTy::U8 => I8,
95             ty::UintTy::U16 => I16,
96             ty::UintTy::U32 => I32,
97             ty::UintTy::U64 => I64,
98             ty::UintTy::U128 => I128,
99             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
100         }
101     }
102
103     /// Finds the appropriate Integer type and signedness for the given
104     /// signed discriminant range and `#[repr]` attribute.
105     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
106     /// that shouldn't affect anything, other than maybe debuginfo.
107     fn repr_discr<'tcx>(
108         tcx: TyCtxt<'tcx>,
109         ty: Ty<'tcx>,
110         repr: &ReprOptions,
111         min: i128,
112         max: i128,
113     ) -> (Integer, bool) {
114         // Theoretically, negative values could be larger in unsigned representation
115         // than the unsigned representation of the signed minimum. However, if there
116         // are any negative values, the only valid unsigned representation is u128
117         // which can fit all i128 values, so the result remains unaffected.
118         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
119         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
120
121         if let Some(ity) = repr.int {
122             let discr = Integer::from_attr(&tcx, ity);
123             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
124             if discr < fit {
125                 bug!(
126                     "Integer::repr_discr: `#[repr]` hint too small for \
127                       discriminant range of enum `{}",
128                     ty
129                 )
130             }
131             return (discr, ity.is_signed());
132         }
133
134         let at_least = if repr.c() {
135             // This is usually I32, however it can be different on some platforms,
136             // notably hexagon and arm-none/thumb-none
137             tcx.data_layout().c_enum_min_size
138         } else {
139             // repr(Rust) enums try to be as small as possible
140             I8
141         };
142
143         // If there are no negative values, we can use the unsigned fit.
144         if min >= 0 {
145             (cmp::max(unsigned_fit, at_least), false)
146         } else {
147             (cmp::max(signed_fit, at_least), true)
148         }
149     }
150 }
151
152 pub trait PrimitiveExt {
153     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
154     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 }
156
157 impl PrimitiveExt for Primitive {
158     #[inline]
159     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
160         match *self {
161             Int(i, signed) => i.to_ty(tcx, signed),
162             F32 => tcx.types.f32,
163             F64 => tcx.types.f64,
164             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
165         }
166     }
167
168     /// Return an *integer* type matching this primitive.
169     /// Useful in particular when dealing with enum discriminants.
170     #[inline]
171     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
172         match *self {
173             Int(i, signed) => i.to_ty(tcx, signed),
174             Pointer => tcx.types.usize,
175             F32 | F64 => bug!("floats do not have an int type"),
176         }
177     }
178 }
179
180 /// The first half of a fat pointer.
181 ///
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
185
186 /// The second half of a fat pointer.
187 ///
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
191
192 /// The maximum supported number of lanes in a SIMD vector.
193 ///
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
198
199 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
201     Unknown(Ty<'tcx>),
202     SizeOverflow(Ty<'tcx>),
203     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
204 }
205
206 impl<'tcx> fmt::Display for LayoutError<'tcx> {
207     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208         match *self {
209             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
210             LayoutError::SizeOverflow(ty) => {
211                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
212             }
213             LayoutError::NormalizationFailure(t, e) => write!(
214                 f,
215                 "unable to determine layout for `{}` because `{}` cannot be normalized",
216                 t,
217                 e.get_type_for_failure()
218             ),
219         }
220     }
221 }
222
223 #[instrument(skip(tcx, query), level = "debug")]
224 fn layout_of<'tcx>(
225     tcx: TyCtxt<'tcx>,
226     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
227 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
228     ty::tls::with_related_context(tcx, move |icx| {
229         let (param_env, ty) = query.into_parts();
230         debug!(?ty);
231
232         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
233             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
234         }
235
236         // Update the ImplicitCtxt to increase the layout_depth
237         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
238
239         ty::tls::enter_context(&icx, |_| {
240             let param_env = param_env.with_reveal_all_normalized(tcx);
241             let unnormalized_ty = ty;
242
243             // FIXME: We might want to have two different versions of `layout_of`:
244             // One that can be called after typecheck has completed and can use
245             // `normalize_erasing_regions` here and another one that can be called
246             // before typecheck has completed and uses `try_normalize_erasing_regions`.
247             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
248                 Ok(t) => t,
249                 Err(normalization_error) => {
250                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
251                 }
252             };
253
254             if ty != unnormalized_ty {
255                 // Ensure this layout is also cached for the normalized type.
256                 return tcx.layout_of(param_env.and(ty));
257             }
258
259             let cx = LayoutCx { tcx, param_env };
260
261             let layout = cx.layout_of_uncached(ty)?;
262             let layout = TyAndLayout { ty, layout };
263
264             cx.record_layout_for_printing(layout);
265
266             // Type-level uninhabitedness should always imply ABI uninhabitedness.
267             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
268                 assert!(layout.abi.is_uninhabited());
269             }
270
271             Ok(layout)
272         })
273     })
274 }
275
276 pub struct LayoutCx<'tcx, C> {
277     pub tcx: C,
278     pub param_env: ty::ParamEnv<'tcx>,
279 }
280
281 #[derive(Copy, Clone, Debug)]
282 enum StructKind {
283     /// A tuple, closure, or univariant which cannot be coerced to unsized.
284     AlwaysSized,
285     /// A univariant, the last field of which may be coerced to unsized.
286     MaybeUnsized,
287     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
288     Prefixed(Size, Align),
289 }
290
291 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
292 // This is used to go between `memory_index` (source field order to memory order)
293 // and `inverse_memory_index` (memory order to source field order).
294 // See also `FieldsShape::Arbitrary::memory_index` for more details.
295 // FIXME(eddyb) build a better abstraction for permutations, if possible.
296 fn invert_mapping(map: &[u32]) -> Vec<u32> {
297     let mut inverse = vec![0; map.len()];
298     for i in 0..map.len() {
299         inverse[map[i] as usize] = i as u32;
300     }
301     inverse
302 }
303
304 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
305     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
306         let dl = self.data_layout();
307         let b_align = b.align(dl);
308         let align = a.align(dl).max(b_align).max(dl.aggregate_align);
309         let b_offset = a.size(dl).align_to(b_align.abi);
310         let size = (b_offset + b.size(dl)).align_to(align.abi);
311
312         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
313         // returns the last maximum.
314         let largest_niche = Niche::from_scalar(dl, b_offset, b)
315             .into_iter()
316             .chain(Niche::from_scalar(dl, Size::ZERO, a))
317             .max_by_key(|niche| niche.available(dl));
318
319         LayoutS {
320             variants: Variants::Single { index: VariantIdx::new(0) },
321             fields: FieldsShape::Arbitrary {
322                 offsets: vec![Size::ZERO, b_offset],
323                 memory_index: vec![0, 1],
324             },
325             abi: Abi::ScalarPair(a, b),
326             largest_niche,
327             align,
328             size,
329         }
330     }
331
332     fn univariant_uninterned(
333         &self,
334         ty: Ty<'tcx>,
335         fields: &[TyAndLayout<'_>],
336         repr: &ReprOptions,
337         kind: StructKind,
338     ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
339         let dl = self.data_layout();
340         let pack = repr.pack;
341         if pack.is_some() && repr.align.is_some() {
342             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
343             return Err(LayoutError::Unknown(ty));
344         }
345
346         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
347
348         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
349
350         let optimize = !repr.inhibit_struct_field_reordering_opt();
351         if optimize {
352             let end =
353                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
354             let optimizing = &mut inverse_memory_index[..end];
355             let field_align = |f: &TyAndLayout<'_>| {
356                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
357             };
358
359             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
360             // the field ordering to try and catch some code making assumptions about layouts
361             // we don't guarantee
362             if repr.can_randomize_type_layout() {
363                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
364                 // randomize field ordering with
365                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
366
367                 // Shuffle the ordering of the fields
368                 optimizing.shuffle(&mut rng);
369
370             // Otherwise we just leave things alone and actually optimize the type's fields
371             } else {
372                 match kind {
373                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
374                         optimizing.sort_by_key(|&x| {
375                             // Place ZSTs first to avoid "interesting offsets",
376                             // especially with only one or two non-ZST fields.
377                             let f = &fields[x as usize];
378                             (!f.is_zst(), cmp::Reverse(field_align(f)))
379                         });
380                     }
381
382                     StructKind::Prefixed(..) => {
383                         // Sort in ascending alignment so that the layout stays optimal
384                         // regardless of the prefix
385                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
386                     }
387                 }
388
389                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
390                 //                 regardless of the status of `-Z randomize-layout`
391             }
392         }
393
394         // inverse_memory_index holds field indices by increasing memory offset.
395         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
396         // We now write field offsets to the corresponding offset slot;
397         // field 5 with offset 0 puts 0 in offsets[5].
398         // At the bottom of this function, we invert `inverse_memory_index` to
399         // produce `memory_index` (see `invert_mapping`).
400
401         let mut sized = true;
402         let mut offsets = vec![Size::ZERO; fields.len()];
403         let mut offset = Size::ZERO;
404         let mut largest_niche = None;
405         let mut largest_niche_available = 0;
406
407         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
408             let prefix_align =
409                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
410             align = align.max(AbiAndPrefAlign::new(prefix_align));
411             offset = prefix_size.align_to(prefix_align);
412         }
413
414         for &i in &inverse_memory_index {
415             let field = fields[i as usize];
416             if !sized {
417                 self.tcx.sess.delay_span_bug(
418                     DUMMY_SP,
419                     &format!(
420                         "univariant: field #{} of `{}` comes after unsized field",
421                         offsets.len(),
422                         ty
423                     ),
424                 );
425             }
426
427             if field.is_unsized() {
428                 sized = false;
429             }
430
431             // Invariant: offset < dl.obj_size_bound() <= 1<<61
432             let field_align = if let Some(pack) = pack {
433                 field.align.min(AbiAndPrefAlign::new(pack))
434             } else {
435                 field.align
436             };
437             offset = offset.align_to(field_align.abi);
438             align = align.max(field_align);
439
440             debug!("univariant offset: {:?} field: {:#?}", offset, field);
441             offsets[i as usize] = offset;
442
443             if !repr.hide_niche() {
444                 if let Some(mut niche) = field.largest_niche {
445                     let available = niche.available(dl);
446                     if available > largest_niche_available {
447                         largest_niche_available = available;
448                         niche.offset += offset;
449                         largest_niche = Some(niche);
450                     }
451                 }
452             }
453
454             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
455         }
456
457         if let Some(repr_align) = repr.align {
458             align = align.max(AbiAndPrefAlign::new(repr_align));
459         }
460
461         debug!("univariant min_size: {:?}", offset);
462         let min_size = offset;
463
464         // As stated above, inverse_memory_index holds field indices by increasing offset.
465         // This makes it an already-sorted view of the offsets vec.
466         // To invert it, consider:
467         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
468         // Field 5 would be the first element, so memory_index is i:
469         // Note: if we didn't optimize, it's already right.
470
471         let memory_index =
472             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
473
474         let size = min_size.align_to(align.abi);
475         let mut abi = Abi::Aggregate { sized };
476
477         // Unpack newtype ABIs and find scalar pairs.
478         if sized && size.bytes() > 0 {
479             // All other fields must be ZSTs.
480             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
481
482             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
483                 // We have exactly one non-ZST field.
484                 (Some((i, field)), None, None) => {
485                     // Field fills the struct and it has a scalar or scalar pair ABI.
486                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
487                     {
488                         match field.abi {
489                             // For plain scalars, or vectors of them, we can't unpack
490                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
491                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
492                                 abi = field.abi;
493                             }
494                             // But scalar pairs are Rust-specific and get
495                             // treated as aggregates by C ABIs anyway.
496                             Abi::ScalarPair(..) => {
497                                 abi = field.abi;
498                             }
499                             _ => {}
500                         }
501                     }
502                 }
503
504                 // Two non-ZST fields, and they're both scalars.
505                 (Some((i, a)), Some((j, b)), None) => {
506                     match (a.abi, b.abi) {
507                         (Abi::Scalar(a), Abi::Scalar(b)) => {
508                             // Order by the memory placement, not source order.
509                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
510                                 ((i, a), (j, b))
511                             } else {
512                                 ((j, b), (i, a))
513                             };
514                             let pair = self.scalar_pair(a, b);
515                             let pair_offsets = match pair.fields {
516                                 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
517                                     assert_eq!(memory_index, &[0, 1]);
518                                     offsets
519                                 }
520                                 _ => bug!(),
521                             };
522                             if offsets[i] == pair_offsets[0]
523                                 && offsets[j] == pair_offsets[1]
524                                 && align == pair.align
525                                 && size == pair.size
526                             {
527                                 // We can use `ScalarPair` only when it matches our
528                                 // already computed layout (including `#[repr(C)]`).
529                                 abi = pair.abi;
530                             }
531                         }
532                         _ => {}
533                     }
534                 }
535
536                 _ => {}
537             }
538         }
539
540         if fields.iter().any(|f| f.abi.is_uninhabited()) {
541             abi = Abi::Uninhabited;
542         }
543
544         Ok(LayoutS {
545             variants: Variants::Single { index: VariantIdx::new(0) },
546             fields: FieldsShape::Arbitrary { offsets, memory_index },
547             abi,
548             largest_niche,
549             align,
550             size,
551         })
552     }
553
554     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
555         let tcx = self.tcx;
556         let param_env = self.param_env;
557         let dl = self.data_layout();
558         let scalar_unit = |value: Primitive| {
559             let size = value.size(dl);
560             assert!(size.bits() <= 128);
561             Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
562         };
563         let scalar =
564             |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
565
566         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
567             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
568         };
569         debug_assert!(!ty.has_infer_types_or_consts());
570
571         Ok(match *ty.kind() {
572             // Basic scalars.
573             ty::Bool => tcx.intern_layout(LayoutS::scalar(
574                 self,
575                 Scalar::Initialized {
576                     value: Int(I8, false),
577                     valid_range: WrappingRange { start: 0, end: 1 },
578                 },
579             )),
580             ty::Char => tcx.intern_layout(LayoutS::scalar(
581                 self,
582                 Scalar::Initialized {
583                     value: Int(I32, false),
584                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
585                 },
586             )),
587             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
588             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
589             ty::Float(fty) => scalar(match fty {
590                 ty::FloatTy::F32 => F32,
591                 ty::FloatTy::F64 => F64,
592             }),
593             ty::FnPtr(_) => {
594                 let mut ptr = scalar_unit(Pointer);
595                 ptr.valid_range_mut().start = 1;
596                 tcx.intern_layout(LayoutS::scalar(self, ptr))
597             }
598
599             // The never type.
600             ty::Never => tcx.intern_layout(LayoutS {
601                 variants: Variants::Single { index: VariantIdx::new(0) },
602                 fields: FieldsShape::Primitive,
603                 abi: Abi::Uninhabited,
604                 largest_niche: None,
605                 align: dl.i8_align,
606                 size: Size::ZERO,
607             }),
608
609             // Potentially-wide pointers.
610             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
611                 let mut data_ptr = scalar_unit(Pointer);
612                 if !ty.is_unsafe_ptr() {
613                     data_ptr.valid_range_mut().start = 1;
614                 }
615
616                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
617                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
618                     return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
619                 }
620
621                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
622                 let metadata = match unsized_part.kind() {
623                     ty::Foreign(..) => {
624                         return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
625                     }
626                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
627                     ty::Dynamic(..) => {
628                         let mut vtable = scalar_unit(Pointer);
629                         vtable.valid_range_mut().start = 1;
630                         vtable
631                     }
632                     _ => return Err(LayoutError::Unknown(unsized_part)),
633                 };
634
635                 // Effectively a (ptr, meta) tuple.
636                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
637             }
638
639             // Arrays and slices.
640             ty::Array(element, mut count) => {
641                 if count.has_projections() {
642                     count = tcx.normalize_erasing_regions(param_env, count);
643                     if count.has_projections() {
644                         return Err(LayoutError::Unknown(ty));
645                     }
646                 }
647
648                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
649                 let element = self.layout_of(element)?;
650                 let size =
651                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
652
653                 let abi =
654                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
655                         Abi::Uninhabited
656                     } else {
657                         Abi::Aggregate { sized: true }
658                     };
659
660                 let largest_niche = if count != 0 { element.largest_niche } else { None };
661
662                 tcx.intern_layout(LayoutS {
663                     variants: Variants::Single { index: VariantIdx::new(0) },
664                     fields: FieldsShape::Array { stride: element.size, count },
665                     abi,
666                     largest_niche,
667                     align: element.align,
668                     size,
669                 })
670             }
671             ty::Slice(element) => {
672                 let element = self.layout_of(element)?;
673                 tcx.intern_layout(LayoutS {
674                     variants: Variants::Single { index: VariantIdx::new(0) },
675                     fields: FieldsShape::Array { stride: element.size, count: 0 },
676                     abi: Abi::Aggregate { sized: false },
677                     largest_niche: None,
678                     align: element.align,
679                     size: Size::ZERO,
680                 })
681             }
682             ty::Str => tcx.intern_layout(LayoutS {
683                 variants: Variants::Single { index: VariantIdx::new(0) },
684                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
685                 abi: Abi::Aggregate { sized: false },
686                 largest_niche: None,
687                 align: dl.i8_align,
688                 size: Size::ZERO,
689             }),
690
691             // Odd unit types.
692             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
693             ty::Dynamic(..) | ty::Foreign(..) => {
694                 let mut unit = self.univariant_uninterned(
695                     ty,
696                     &[],
697                     &ReprOptions::default(),
698                     StructKind::AlwaysSized,
699                 )?;
700                 match unit.abi {
701                     Abi::Aggregate { ref mut sized } => *sized = false,
702                     _ => bug!(),
703                 }
704                 tcx.intern_layout(unit)
705             }
706
707             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
708
709             ty::Closure(_, ref substs) => {
710                 let tys = substs.as_closure().upvar_tys();
711                 univariant(
712                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
713                     &ReprOptions::default(),
714                     StructKind::AlwaysSized,
715                 )?
716             }
717
718             ty::Tuple(tys) => {
719                 let kind =
720                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
721
722                 univariant(
723                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
724                     &ReprOptions::default(),
725                     kind,
726                 )?
727             }
728
729             // SIMD vector types.
730             ty::Adt(def, substs) if def.repr().simd() => {
731                 if !def.is_struct() {
732                     // Should have yielded E0517 by now.
733                     tcx.sess.delay_span_bug(
734                         DUMMY_SP,
735                         "#[repr(simd)] was applied to an ADT that is not a struct",
736                     );
737                     return Err(LayoutError::Unknown(ty));
738                 }
739
740                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
741                 //
742                 // * #[repr(simd)] struct S(T, T, T, T);
743                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
744                 // * #[repr(simd)] struct S([T; 4])
745                 //
746                 // where T is a primitive scalar (integer/float/pointer).
747
748                 // SIMD vectors with zero fields are not supported.
749                 // (should be caught by typeck)
750                 if def.non_enum_variant().fields.is_empty() {
751                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
752                 }
753
754                 // Type of the first ADT field:
755                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
756
757                 // Heterogeneous SIMD vectors are not supported:
758                 // (should be caught by typeck)
759                 for fi in &def.non_enum_variant().fields {
760                     if fi.ty(tcx, substs) != f0_ty {
761                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
762                     }
763                 }
764
765                 // The element type and number of elements of the SIMD vector
766                 // are obtained from:
767                 //
768                 // * the element type and length of the single array field, if
769                 // the first field is of array type, or
770                 //
771                 // * the homogenous field type and the number of fields.
772                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
773                     // First ADT field is an array:
774
775                     // SIMD vectors with multiple array fields are not supported:
776                     // (should be caught by typeck)
777                     if def.non_enum_variant().fields.len() != 1 {
778                         tcx.sess.fatal(&format!(
779                             "monomorphising SIMD type `{}` with more than one array field",
780                             ty
781                         ));
782                     }
783
784                     // Extract the number of elements from the layout of the array field:
785                     let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
786                         return Err(LayoutError::Unknown(ty));
787                     };
788
789                     (*e_ty, *count, true)
790                 } else {
791                     // First ADT field is not an array:
792                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
793                 };
794
795                 // SIMD vectors of zero length are not supported.
796                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
797                 // support.
798                 //
799                 // Can't be caught in typeck if the array length is generic.
800                 if e_len == 0 {
801                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
802                 } else if e_len > MAX_SIMD_LANES {
803                     tcx.sess.fatal(&format!(
804                         "monomorphising SIMD type `{}` of length greater than {}",
805                         ty, MAX_SIMD_LANES,
806                     ));
807                 }
808
809                 // Compute the ABI of the element type:
810                 let e_ly = self.layout_of(e_ty)?;
811                 let Abi::Scalar(e_abi) = e_ly.abi else {
812                     // This error isn't caught in typeck, e.g., if
813                     // the element type of the vector is generic.
814                     tcx.sess.fatal(&format!(
815                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
816                         (integer/float/pointer) element type `{}`",
817                         ty, e_ty
818                     ))
819                 };
820
821                 // Compute the size and alignment of the vector:
822                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
823                 let align = dl.vector_align(size);
824                 let size = size.align_to(align.abi);
825
826                 // Compute the placement of the vector fields:
827                 let fields = if is_array {
828                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
829                 } else {
830                     FieldsShape::Array { stride: e_ly.size, count: e_len }
831                 };
832
833                 tcx.intern_layout(LayoutS {
834                     variants: Variants::Single { index: VariantIdx::new(0) },
835                     fields,
836                     abi: Abi::Vector { element: e_abi, count: e_len },
837                     largest_niche: e_ly.largest_niche,
838                     size,
839                     align,
840                 })
841             }
842
843             // ADTs.
844             ty::Adt(def, substs) => {
845                 // Cache the field layouts.
846                 let variants = def
847                     .variants()
848                     .iter()
849                     .map(|v| {
850                         v.fields
851                             .iter()
852                             .map(|field| self.layout_of(field.ty(tcx, substs)))
853                             .collect::<Result<Vec<_>, _>>()
854                     })
855                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
856
857                 if def.is_union() {
858                     if def.repr().pack.is_some() && def.repr().align.is_some() {
859                         self.tcx.sess.delay_span_bug(
860                             tcx.def_span(def.did()),
861                             "union cannot be packed and aligned",
862                         );
863                         return Err(LayoutError::Unknown(ty));
864                     }
865
866                     let mut align =
867                         if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
868
869                     if let Some(repr_align) = def.repr().align {
870                         align = align.max(AbiAndPrefAlign::new(repr_align));
871                     }
872
873                     let optimize = !def.repr().inhibit_union_abi_opt();
874                     let mut size = Size::ZERO;
875                     let mut abi = Abi::Aggregate { sized: true };
876                     let index = VariantIdx::new(0);
877                     for field in &variants[index] {
878                         assert!(!field.is_unsized());
879                         align = align.max(field.align);
880
881                         // If all non-ZST fields have the same ABI, forward this ABI
882                         if optimize && !field.is_zst() {
883                             // Discard valid range information and allow undef
884                             let field_abi = match field.abi {
885                                 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
886                                 Abi::ScalarPair(x, y) => {
887                                     Abi::ScalarPair(x.to_union(), y.to_union())
888                                 }
889                                 Abi::Vector { element: x, count } => {
890                                     Abi::Vector { element: x.to_union(), count }
891                                 }
892                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
893                                     Abi::Aggregate { sized: true }
894                                 }
895                             };
896
897                             if size == Size::ZERO {
898                                 // first non ZST: initialize 'abi'
899                                 abi = field_abi;
900                             } else if abi != field_abi {
901                                 // different fields have different ABI: reset to Aggregate
902                                 abi = Abi::Aggregate { sized: true };
903                             }
904                         }
905
906                         size = cmp::max(size, field.size);
907                     }
908
909                     if let Some(pack) = def.repr().pack {
910                         align = align.min(AbiAndPrefAlign::new(pack));
911                     }
912
913                     return Ok(tcx.intern_layout(LayoutS {
914                         variants: Variants::Single { index },
915                         fields: FieldsShape::Union(
916                             NonZeroUsize::new(variants[index].len())
917                                 .ok_or(LayoutError::Unknown(ty))?,
918                         ),
919                         abi,
920                         largest_niche: None,
921                         align,
922                         size: size.align_to(align.abi),
923                     }));
924                 }
925
926                 // A variant is absent if it's uninhabited and only has ZST fields.
927                 // Present uninhabited variants only require space for their fields,
928                 // but *not* an encoding of the discriminant (e.g., a tag value).
929                 // See issue #49298 for more details on the need to leave space
930                 // for non-ZST uninhabited data (mostly partial initialization).
931                 let absent = |fields: &[TyAndLayout<'_>]| {
932                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
933                     let is_zst = fields.iter().all(|f| f.is_zst());
934                     uninhabited && is_zst
935                 };
936                 let (present_first, present_second) = {
937                     let mut present_variants = variants
938                         .iter_enumerated()
939                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
940                     (present_variants.next(), present_variants.next())
941                 };
942                 let present_first = match present_first {
943                     Some(present_first) => present_first,
944                     // Uninhabited because it has no variants, or only absent ones.
945                     None if def.is_enum() => {
946                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
947                     }
948                     // If it's a struct, still compute a layout so that we can still compute the
949                     // field offsets.
950                     None => VariantIdx::new(0),
951                 };
952
953                 let is_struct = !def.is_enum() ||
954                     // Only one variant is present.
955                     (present_second.is_none() &&
956                     // Representation optimizations are allowed.
957                     !def.repr().inhibit_enum_layout_opt());
958                 if is_struct {
959                     // Struct, or univariant enum equivalent to a struct.
960                     // (Typechecking will reject discriminant-sizing attrs.)
961
962                     let v = present_first;
963                     let kind = if def.is_enum() || variants[v].is_empty() {
964                         StructKind::AlwaysSized
965                     } else {
966                         let param_env = tcx.param_env(def.did());
967                         let last_field = def.variant(v).fields.last().unwrap();
968                         let always_sized =
969                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
970                         if !always_sized {
971                             StructKind::MaybeUnsized
972                         } else {
973                             StructKind::AlwaysSized
974                         }
975                     };
976
977                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
978                     st.variants = Variants::Single { index: v };
979                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
980                     match st.abi {
981                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
982                             // the asserts ensure that we are not using the
983                             // `#[rustc_layout_scalar_valid_range(n)]`
984                             // attribute to widen the range of anything as that would probably
985                             // result in UB somewhere
986                             // FIXME(eddyb) the asserts are probably not needed,
987                             // as larger validity ranges would result in missed
988                             // optimizations, *not* wrongly assuming the inner
989                             // value is valid. e.g. unions enlarge validity ranges,
990                             // because the values may be uninitialized.
991                             if let Bound::Included(start) = start {
992                                 // FIXME(eddyb) this might be incorrect - it doesn't
993                                 // account for wrap-around (end < start) ranges.
994                                 let valid_range = scalar.valid_range_mut();
995                                 assert!(valid_range.start <= start);
996                                 valid_range.start = start;
997                             }
998                             if let Bound::Included(end) = end {
999                                 // FIXME(eddyb) this might be incorrect - it doesn't
1000                                 // account for wrap-around (end < start) ranges.
1001                                 let valid_range = scalar.valid_range_mut();
1002                                 assert!(valid_range.end >= end);
1003                                 valid_range.end = end;
1004                             }
1005
1006                             // Update `largest_niche` if we have introduced a larger niche.
1007                             let niche = if def.repr().hide_niche() {
1008                                 None
1009                             } else {
1010                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1011                             };
1012                             if let Some(niche) = niche {
1013                                 match st.largest_niche {
1014                                     Some(largest_niche) => {
1015                                         // Replace the existing niche even if they're equal,
1016                                         // because this one is at a lower offset.
1017                                         if largest_niche.available(dl) <= niche.available(dl) {
1018                                             st.largest_niche = Some(niche);
1019                                         }
1020                                     }
1021                                     None => st.largest_niche = Some(niche),
1022                                 }
1023                             }
1024                         }
1025                         _ => assert!(
1026                             start == Bound::Unbounded && end == Bound::Unbounded,
1027                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1028                             def,
1029                             st,
1030                         ),
1031                     }
1032
1033                     return Ok(tcx.intern_layout(st));
1034                 }
1035
1036                 // At this point, we have handled all unions and
1037                 // structs. (We have also handled univariant enums
1038                 // that allow representation optimization.)
1039                 assert!(def.is_enum());
1040
1041                 // The current code for niche-filling relies on variant indices
1042                 // instead of actual discriminants, so dataful enums with
1043                 // explicit discriminants (RFC #2363) would misbehave.
1044                 let no_explicit_discriminants = def
1045                     .variants()
1046                     .iter_enumerated()
1047                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1048
1049                 let mut niche_filling_layout = None;
1050
1051                 // Niche-filling enum optimization.
1052                 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1053                     let mut dataful_variant = None;
1054                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1055
1056                     // Find one non-ZST variant.
1057                     'variants: for (v, fields) in variants.iter_enumerated() {
1058                         if absent(fields) {
1059                             continue 'variants;
1060                         }
1061                         for f in fields {
1062                             if !f.is_zst() {
1063                                 if dataful_variant.is_none() {
1064                                     dataful_variant = Some(v);
1065                                     continue 'variants;
1066                                 } else {
1067                                     dataful_variant = None;
1068                                     break 'variants;
1069                                 }
1070                             }
1071                         }
1072                         niche_variants = *niche_variants.start().min(&v)..=v;
1073                     }
1074
1075                     if niche_variants.start() > niche_variants.end() {
1076                         dataful_variant = None;
1077                     }
1078
1079                     if let Some(i) = dataful_variant {
1080                         let count = (niche_variants.end().as_u32()
1081                             - niche_variants.start().as_u32()
1082                             + 1) as u128;
1083
1084                         // Find the field with the largest niche
1085                         let niche_candidate = variants[i]
1086                             .iter()
1087                             .enumerate()
1088                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1089                             .max_by_key(|(_, niche)| niche.available(dl));
1090
1091                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1092                             niche_candidate.and_then(|(field_index, niche)| {
1093                                 Some((field_index, niche, niche.reserve(self, count)?))
1094                             })
1095                         {
1096                             let mut align = dl.aggregate_align;
1097                             let st = variants
1098                                 .iter_enumerated()
1099                                 .map(|(j, v)| {
1100                                     let mut st = self.univariant_uninterned(
1101                                         ty,
1102                                         v,
1103                                         &def.repr(),
1104                                         StructKind::AlwaysSized,
1105                                     )?;
1106                                     st.variants = Variants::Single { index: j };
1107
1108                                     align = align.max(st.align);
1109
1110                                     Ok(tcx.intern_layout(st))
1111                                 })
1112                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1113
1114                             let offset = st[i].fields().offset(field_index) + niche.offset;
1115                             let size = st[i].size();
1116
1117                             let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1118                                 Abi::Uninhabited
1119                             } else {
1120                                 match st[i].abi() {
1121                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1122                                     Abi::ScalarPair(first, second) => {
1123                                         // We need to use scalar_unit to reset the
1124                                         // valid range to the maximal one for that
1125                                         // primitive, because only the niche is
1126                                         // guaranteed to be initialised, not the
1127                                         // other primitive.
1128                                         if offset.bytes() == 0 {
1129                                             Abi::ScalarPair(
1130                                                 niche_scalar,
1131                                                 scalar_unit(second.primitive()),
1132                                             )
1133                                         } else {
1134                                             Abi::ScalarPair(
1135                                                 scalar_unit(first.primitive()),
1136                                                 niche_scalar,
1137                                             )
1138                                         }
1139                                     }
1140                                     _ => Abi::Aggregate { sized: true },
1141                                 }
1142                             };
1143
1144                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1145
1146                             niche_filling_layout = Some(LayoutS {
1147                                 variants: Variants::Multiple {
1148                                     tag: niche_scalar,
1149                                     tag_encoding: TagEncoding::Niche {
1150                                         dataful_variant: i,
1151                                         niche_variants,
1152                                         niche_start,
1153                                     },
1154                                     tag_field: 0,
1155                                     variants: st,
1156                                 },
1157                                 fields: FieldsShape::Arbitrary {
1158                                     offsets: vec![offset],
1159                                     memory_index: vec![0],
1160                                 },
1161                                 abi,
1162                                 largest_niche,
1163                                 size,
1164                                 align,
1165                             });
1166                         }
1167                     }
1168                 }
1169
1170                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1171                 let discr_type = def.repr().discr_type();
1172                 let bits = Integer::from_attr(self, discr_type).size().bits();
1173                 for (i, discr) in def.discriminants(tcx) {
1174                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1175                         continue;
1176                     }
1177                     let mut x = discr.val as i128;
1178                     if discr_type.is_signed() {
1179                         // sign extend the raw representation to be an i128
1180                         x = (x << (128 - bits)) >> (128 - bits);
1181                     }
1182                     if x < min {
1183                         min = x;
1184                     }
1185                     if x > max {
1186                         max = x;
1187                     }
1188                 }
1189                 // We might have no inhabited variants, so pretend there's at least one.
1190                 if (min, max) == (i128::MAX, i128::MIN) {
1191                     min = 0;
1192                     max = 0;
1193                 }
1194                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1195                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1196
1197                 let mut align = dl.aggregate_align;
1198                 let mut size = Size::ZERO;
1199
1200                 // We're interested in the smallest alignment, so start large.
1201                 let mut start_align = Align::from_bytes(256).unwrap();
1202                 assert_eq!(Integer::for_align(dl, start_align), None);
1203
1204                 // repr(C) on an enum tells us to make a (tag, union) layout,
1205                 // so we need to grow the prefix alignment to be at least
1206                 // the alignment of the union. (This value is used both for
1207                 // determining the alignment of the overall enum, and the
1208                 // determining the alignment of the payload after the tag.)
1209                 let mut prefix_align = min_ity.align(dl).abi;
1210                 if def.repr().c() {
1211                     for fields in &variants {
1212                         for field in fields {
1213                             prefix_align = prefix_align.max(field.align.abi);
1214                         }
1215                     }
1216                 }
1217
1218                 // Create the set of structs that represent each variant.
1219                 let mut layout_variants = variants
1220                     .iter_enumerated()
1221                     .map(|(i, field_layouts)| {
1222                         let mut st = self.univariant_uninterned(
1223                             ty,
1224                             &field_layouts,
1225                             &def.repr(),
1226                             StructKind::Prefixed(min_ity.size(), prefix_align),
1227                         )?;
1228                         st.variants = Variants::Single { index: i };
1229                         // Find the first field we can't move later
1230                         // to make room for a larger discriminant.
1231                         for field in
1232                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1233                         {
1234                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1235                                 start_align = start_align.min(field.align.abi);
1236                                 break;
1237                             }
1238                         }
1239                         size = cmp::max(size, st.size);
1240                         align = align.max(st.align);
1241                         Ok(st)
1242                     })
1243                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1244
1245                 // Align the maximum variant size to the largest alignment.
1246                 size = size.align_to(align.abi);
1247
1248                 if size.bytes() >= dl.obj_size_bound() {
1249                     return Err(LayoutError::SizeOverflow(ty));
1250                 }
1251
1252                 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1253                 if typeck_ity < min_ity {
1254                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1255                     // some reason at this point (based on values discriminant can take on). Mostly
1256                     // because this discriminant will be loaded, and then stored into variable of
1257                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1258                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1259                     // discriminant values. That would be a bug, because then, in codegen, in order
1260                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1261                     // space necessary to represent would have to be discarded (or layout is wrong
1262                     // on thinking it needs 16 bits)
1263                     bug!(
1264                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1265                         min_ity,
1266                         typeck_ity
1267                     );
1268                     // However, it is fine to make discr type however large (as an optimisation)
1269                     // after this point â€“ we’ll just truncate the value we load in codegen.
1270                 }
1271
1272                 // Check to see if we should use a different type for the
1273                 // discriminant. We can safely use a type with the same size
1274                 // as the alignment of the first field of each variant.
1275                 // We increase the size of the discriminant to avoid LLVM copying
1276                 // padding when it doesn't need to. This normally causes unaligned
1277                 // load/stores and excessive memcpy/memset operations. By using a
1278                 // bigger integer size, LLVM can be sure about its contents and
1279                 // won't be so conservative.
1280
1281                 // Use the initial field alignment
1282                 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1283                     min_ity
1284                 } else {
1285                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1286                 };
1287
1288                 // If the alignment is not larger than the chosen discriminant size,
1289                 // don't use the alignment as the final size.
1290                 if ity <= min_ity {
1291                     ity = min_ity;
1292                 } else {
1293                     // Patch up the variants' first few fields.
1294                     let old_ity_size = min_ity.size();
1295                     let new_ity_size = ity.size();
1296                     for variant in &mut layout_variants {
1297                         match variant.fields {
1298                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1299                                 for i in offsets {
1300                                     if *i <= old_ity_size {
1301                                         assert_eq!(*i, old_ity_size);
1302                                         *i = new_ity_size;
1303                                     }
1304                                 }
1305                                 // We might be making the struct larger.
1306                                 if variant.size <= old_ity_size {
1307                                     variant.size = new_ity_size;
1308                                 }
1309                             }
1310                             _ => bug!(),
1311                         }
1312                     }
1313                 }
1314
1315                 let tag_mask = ity.size().unsigned_int_max();
1316                 let tag = Scalar::Initialized {
1317                     value: Int(ity, signed),
1318                     valid_range: WrappingRange {
1319                         start: (min as u128 & tag_mask),
1320                         end: (max as u128 & tag_mask),
1321                     },
1322                 };
1323                 let mut abi = Abi::Aggregate { sized: true };
1324
1325                 // Without latter check aligned enums with custom discriminant values
1326                 // Would result in ICE see the issue #92464 for more info
1327                 if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1328                     abi = Abi::Scalar(tag);
1329                 } else {
1330                     // Try to use a ScalarPair for all tagged enums.
1331                     let mut common_prim = None;
1332                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1333                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1334                             bug!();
1335                         };
1336                         let mut fields =
1337                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1338                         let (field, offset) = match (fields.next(), fields.next()) {
1339                             (None, None) => continue,
1340                             (Some(pair), None) => pair,
1341                             _ => {
1342                                 common_prim = None;
1343                                 break;
1344                             }
1345                         };
1346                         let prim = match field.abi {
1347                             Abi::Scalar(scalar) => scalar.primitive(),
1348                             _ => {
1349                                 common_prim = None;
1350                                 break;
1351                             }
1352                         };
1353                         if let Some(pair) = common_prim {
1354                             // This is pretty conservative. We could go fancier
1355                             // by conflating things like i32 and u32, or even
1356                             // realising that (u8, u8) could just cohabit with
1357                             // u16 or even u32.
1358                             if pair != (prim, offset) {
1359                                 common_prim = None;
1360                                 break;
1361                             }
1362                         } else {
1363                             common_prim = Some((prim, offset));
1364                         }
1365                     }
1366                     if let Some((prim, offset)) = common_prim {
1367                         let pair = self.scalar_pair(tag, scalar_unit(prim));
1368                         let pair_offsets = match pair.fields {
1369                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1370                                 assert_eq!(memory_index, &[0, 1]);
1371                                 offsets
1372                             }
1373                             _ => bug!(),
1374                         };
1375                         if pair_offsets[0] == Size::ZERO
1376                             && pair_offsets[1] == *offset
1377                             && align == pair.align
1378                             && size == pair.size
1379                         {
1380                             // We can use `ScalarPair` only when it matches our
1381                             // already computed layout (including `#[repr(C)]`).
1382                             abi = pair.abi;
1383                         }
1384                     }
1385                 }
1386
1387                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1388                     abi = Abi::Uninhabited;
1389                 }
1390
1391                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1392
1393                 let layout_variants =
1394                     layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1395
1396                 let tagged_layout = LayoutS {
1397                     variants: Variants::Multiple {
1398                         tag,
1399                         tag_encoding: TagEncoding::Direct,
1400                         tag_field: 0,
1401                         variants: layout_variants,
1402                     },
1403                     fields: FieldsShape::Arbitrary {
1404                         offsets: vec![Size::ZERO],
1405                         memory_index: vec![0],
1406                     },
1407                     largest_niche,
1408                     abi,
1409                     align,
1410                     size,
1411                 };
1412
1413                 let best_layout = match (tagged_layout, niche_filling_layout) {
1414                     (tagged_layout, Some(niche_filling_layout)) => {
1415                         // Pick the smaller layout; otherwise,
1416                         // pick the layout with the larger niche; otherwise,
1417                         // pick tagged as it has simpler codegen.
1418                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1419                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1420                             (layout.size, cmp::Reverse(niche_size))
1421                         })
1422                     }
1423                     (tagged_layout, None) => tagged_layout,
1424                 };
1425
1426                 tcx.intern_layout(best_layout)
1427             }
1428
1429             // Types with no meaningful known layout.
1430             ty::Projection(_) | ty::Opaque(..) => {
1431                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1432                 // if that was possible, so there's no reason to try again here.
1433                 return Err(LayoutError::Unknown(ty));
1434             }
1435
1436             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1437                 bug!("Layout::compute: unexpected type `{}`", ty)
1438             }
1439
1440             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1441                 return Err(LayoutError::Unknown(ty));
1442             }
1443         })
1444     }
1445 }
1446
1447 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1448 #[derive(Clone, Debug, PartialEq)]
1449 enum SavedLocalEligibility {
1450     Unassigned,
1451     Assigned(VariantIdx),
1452     // FIXME: Use newtype_index so we aren't wasting bytes
1453     Ineligible(Option<u32>),
1454 }
1455
1456 // When laying out generators, we divide our saved local fields into two
1457 // categories: overlap-eligible and overlap-ineligible.
1458 //
1459 // Those fields which are ineligible for overlap go in a "prefix" at the
1460 // beginning of the layout, and always have space reserved for them.
1461 //
1462 // Overlap-eligible fields are only assigned to one variant, so we lay
1463 // those fields out for each variant and put them right after the
1464 // prefix.
1465 //
1466 // Finally, in the layout details, we point to the fields from the
1467 // variants they are assigned to. It is possible for some fields to be
1468 // included in multiple variants. No field ever "moves around" in the
1469 // layout; its offset is always the same.
1470 //
1471 // Also included in the layout are the upvars and the discriminant.
1472 // These are included as fields on the "outer" layout; they are not part
1473 // of any variant.
1474 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1475     /// Compute the eligibility and assignment of each local.
1476     fn generator_saved_local_eligibility(
1477         &self,
1478         info: &GeneratorLayout<'tcx>,
1479     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1480         use SavedLocalEligibility::*;
1481
1482         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1483             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1484
1485         // The saved locals not eligible for overlap. These will get
1486         // "promoted" to the prefix of our generator.
1487         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1488
1489         // Figure out which of our saved locals are fields in only
1490         // one variant. The rest are deemed ineligible for overlap.
1491         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1492             for local in fields {
1493                 match assignments[*local] {
1494                     Unassigned => {
1495                         assignments[*local] = Assigned(variant_index);
1496                     }
1497                     Assigned(idx) => {
1498                         // We've already seen this local at another suspension
1499                         // point, so it is no longer a candidate.
1500                         trace!(
1501                             "removing local {:?} in >1 variant ({:?}, {:?})",
1502                             local,
1503                             variant_index,
1504                             idx
1505                         );
1506                         ineligible_locals.insert(*local);
1507                         assignments[*local] = Ineligible(None);
1508                     }
1509                     Ineligible(_) => {}
1510                 }
1511             }
1512         }
1513
1514         // Next, check every pair of eligible locals to see if they
1515         // conflict.
1516         for local_a in info.storage_conflicts.rows() {
1517             let conflicts_a = info.storage_conflicts.count(local_a);
1518             if ineligible_locals.contains(local_a) {
1519                 continue;
1520             }
1521
1522             for local_b in info.storage_conflicts.iter(local_a) {
1523                 // local_a and local_b are storage live at the same time, therefore they
1524                 // cannot overlap in the generator layout. The only way to guarantee
1525                 // this is if they are in the same variant, or one is ineligible
1526                 // (which means it is stored in every variant).
1527                 if ineligible_locals.contains(local_b)
1528                     || assignments[local_a] == assignments[local_b]
1529                 {
1530                     continue;
1531                 }
1532
1533                 // If they conflict, we will choose one to make ineligible.
1534                 // This is not always optimal; it's just a greedy heuristic that
1535                 // seems to produce good results most of the time.
1536                 let conflicts_b = info.storage_conflicts.count(local_b);
1537                 let (remove, other) =
1538                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1539                 ineligible_locals.insert(remove);
1540                 assignments[remove] = Ineligible(None);
1541                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1542             }
1543         }
1544
1545         // Count the number of variants in use. If only one of them, then it is
1546         // impossible to overlap any locals in our layout. In this case it's
1547         // always better to make the remaining locals ineligible, so we can
1548         // lay them out with the other locals in the prefix and eliminate
1549         // unnecessary padding bytes.
1550         {
1551             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1552             for assignment in &assignments {
1553                 if let Assigned(idx) = assignment {
1554                     used_variants.insert(*idx);
1555                 }
1556             }
1557             if used_variants.count() < 2 {
1558                 for assignment in assignments.iter_mut() {
1559                     *assignment = Ineligible(None);
1560                 }
1561                 ineligible_locals.insert_all();
1562             }
1563         }
1564
1565         // Write down the order of our locals that will be promoted to the prefix.
1566         {
1567             for (idx, local) in ineligible_locals.iter().enumerate() {
1568                 assignments[local] = Ineligible(Some(idx as u32));
1569             }
1570         }
1571         debug!("generator saved local assignments: {:?}", assignments);
1572
1573         (ineligible_locals, assignments)
1574     }
1575
1576     /// Compute the full generator layout.
1577     fn generator_layout(
1578         &self,
1579         ty: Ty<'tcx>,
1580         def_id: hir::def_id::DefId,
1581         substs: SubstsRef<'tcx>,
1582     ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1583         use SavedLocalEligibility::*;
1584         let tcx = self.tcx;
1585         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1586
1587         let Some(info) = tcx.generator_layout(def_id) else {
1588             return Err(LayoutError::Unknown(ty));
1589         };
1590         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1591
1592         // Build a prefix layout, including "promoting" all ineligible
1593         // locals as part of the prefix. We compute the layout of all of
1594         // these fields at once to get optimal packing.
1595         let tag_index = substs.as_generator().prefix_tys().count();
1596
1597         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1598         let max_discr = (info.variant_fields.len() - 1) as u128;
1599         let discr_int = Integer::fit_unsigned(max_discr);
1600         let discr_int_ty = discr_int.to_ty(tcx, false);
1601         let tag = Scalar::Initialized {
1602             value: Primitive::Int(discr_int, false),
1603             valid_range: WrappingRange { start: 0, end: max_discr },
1604         };
1605         let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1606         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1607
1608         let promoted_layouts = ineligible_locals
1609             .iter()
1610             .map(|local| subst_field(info.field_tys[local]))
1611             .map(|ty| tcx.mk_maybe_uninit(ty))
1612             .map(|ty| self.layout_of(ty));
1613         let prefix_layouts = substs
1614             .as_generator()
1615             .prefix_tys()
1616             .map(|ty| self.layout_of(ty))
1617             .chain(iter::once(Ok(tag_layout)))
1618             .chain(promoted_layouts)
1619             .collect::<Result<Vec<_>, _>>()?;
1620         let prefix = self.univariant_uninterned(
1621             ty,
1622             &prefix_layouts,
1623             &ReprOptions::default(),
1624             StructKind::AlwaysSized,
1625         )?;
1626
1627         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1628
1629         // Split the prefix layout into the "outer" fields (upvars and
1630         // discriminant) and the "promoted" fields. Promoted fields will
1631         // get included in each variant that requested them in
1632         // GeneratorLayout.
1633         debug!("prefix = {:#?}", prefix);
1634         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1635             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1636                 let mut inverse_memory_index = invert_mapping(&memory_index);
1637
1638                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1639                 // "outer" and "promoted" fields respectively.
1640                 let b_start = (tag_index + 1) as u32;
1641                 let offsets_b = offsets.split_off(b_start as usize);
1642                 let offsets_a = offsets;
1643
1644                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1645                 // by preserving the order but keeping only one disjoint "half" each.
1646                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1647                 let inverse_memory_index_b: Vec<_> =
1648                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1649                 inverse_memory_index.retain(|&i| i < b_start);
1650                 let inverse_memory_index_a = inverse_memory_index;
1651
1652                 // Since `inverse_memory_index_{a,b}` each only refer to their
1653                 // respective fields, they can be safely inverted
1654                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1655                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1656
1657                 let outer_fields =
1658                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1659                 (outer_fields, offsets_b, memory_index_b)
1660             }
1661             _ => bug!(),
1662         };
1663
1664         let mut size = prefix.size;
1665         let mut align = prefix.align;
1666         let variants = info
1667             .variant_fields
1668             .iter_enumerated()
1669             .map(|(index, variant_fields)| {
1670                 // Only include overlap-eligible fields when we compute our variant layout.
1671                 let variant_only_tys = variant_fields
1672                     .iter()
1673                     .filter(|local| match assignments[**local] {
1674                         Unassigned => bug!(),
1675                         Assigned(v) if v == index => true,
1676                         Assigned(_) => bug!("assignment does not match variant"),
1677                         Ineligible(_) => false,
1678                     })
1679                     .map(|local| subst_field(info.field_tys[*local]));
1680
1681                 let mut variant = self.univariant_uninterned(
1682                     ty,
1683                     &variant_only_tys
1684                         .map(|ty| self.layout_of(ty))
1685                         .collect::<Result<Vec<_>, _>>()?,
1686                     &ReprOptions::default(),
1687                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1688                 )?;
1689                 variant.variants = Variants::Single { index };
1690
1691                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1692                     bug!();
1693                 };
1694
1695                 // Now, stitch the promoted and variant-only fields back together in
1696                 // the order they are mentioned by our GeneratorLayout.
1697                 // Because we only use some subset (that can differ between variants)
1698                 // of the promoted fields, we can't just pick those elements of the
1699                 // `promoted_memory_index` (as we'd end up with gaps).
1700                 // So instead, we build an "inverse memory_index", as if all of the
1701                 // promoted fields were being used, but leave the elements not in the
1702                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1703                 // obtain a valid (bijective) mapping.
1704                 const INVALID_FIELD_IDX: u32 = !0;
1705                 let mut combined_inverse_memory_index =
1706                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1707                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1708                 let combined_offsets = variant_fields
1709                     .iter()
1710                     .enumerate()
1711                     .map(|(i, local)| {
1712                         let (offset, memory_index) = match assignments[*local] {
1713                             Unassigned => bug!(),
1714                             Assigned(_) => {
1715                                 let (offset, memory_index) =
1716                                     offsets_and_memory_index.next().unwrap();
1717                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1718                             }
1719                             Ineligible(field_idx) => {
1720                                 let field_idx = field_idx.unwrap() as usize;
1721                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1722                             }
1723                         };
1724                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1725                         offset
1726                     })
1727                     .collect();
1728
1729                 // Remove the unused slots and invert the mapping to obtain the
1730                 // combined `memory_index` (also see previous comment).
1731                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1732                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1733
1734                 variant.fields = FieldsShape::Arbitrary {
1735                     offsets: combined_offsets,
1736                     memory_index: combined_memory_index,
1737                 };
1738
1739                 size = size.max(variant.size);
1740                 align = align.max(variant.align);
1741                 Ok(tcx.intern_layout(variant))
1742             })
1743             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1744
1745         size = size.align_to(align.abi);
1746
1747         let abi =
1748             if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1749                 Abi::Uninhabited
1750             } else {
1751                 Abi::Aggregate { sized: true }
1752             };
1753
1754         let layout = tcx.intern_layout(LayoutS {
1755             variants: Variants::Multiple {
1756                 tag,
1757                 tag_encoding: TagEncoding::Direct,
1758                 tag_field: tag_index,
1759                 variants,
1760             },
1761             fields: outer_fields,
1762             abi,
1763             largest_niche: prefix.largest_niche,
1764             size,
1765             align,
1766         });
1767         debug!("generator layout ({:?}): {:#?}", ty, layout);
1768         Ok(layout)
1769     }
1770
1771     /// This is invoked by the `layout_of` query to record the final
1772     /// layout of each type.
1773     #[inline(always)]
1774     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1775         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1776         // for dumping later.
1777         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1778             self.record_layout_for_printing_outlined(layout)
1779         }
1780     }
1781
1782     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1783         // Ignore layouts that are done with non-empty environments or
1784         // non-monomorphic layouts, as the user only wants to see the stuff
1785         // resulting from the final codegen session.
1786         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1787             return;
1788         }
1789
1790         // (delay format until we actually need it)
1791         let record = |kind, packed, opt_discr_size, variants| {
1792             let type_desc = format!("{:?}", layout.ty);
1793             self.tcx.sess.code_stats.record_type_size(
1794                 kind,
1795                 type_desc,
1796                 layout.align.abi,
1797                 layout.size,
1798                 packed,
1799                 opt_discr_size,
1800                 variants,
1801             );
1802         };
1803
1804         let adt_def = match *layout.ty.kind() {
1805             ty::Adt(ref adt_def, _) => {
1806                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1807                 adt_def
1808             }
1809
1810             ty::Closure(..) => {
1811                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1812                 record(DataTypeKind::Closure, false, None, vec![]);
1813                 return;
1814             }
1815
1816             _ => {
1817                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1818                 return;
1819             }
1820         };
1821
1822         let adt_kind = adt_def.adt_kind();
1823         let adt_packed = adt_def.repr().pack.is_some();
1824
1825         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1826             let mut min_size = Size::ZERO;
1827             let field_info: Vec<_> = flds
1828                 .iter()
1829                 .enumerate()
1830                 .map(|(i, &name)| {
1831                     let field_layout = layout.field(self, i);
1832                     let offset = layout.fields.offset(i);
1833                     let field_end = offset + field_layout.size;
1834                     if min_size < field_end {
1835                         min_size = field_end;
1836                     }
1837                     FieldInfo {
1838                         name: name.to_string(),
1839                         offset: offset.bytes(),
1840                         size: field_layout.size.bytes(),
1841                         align: field_layout.align.abi.bytes(),
1842                     }
1843                 })
1844                 .collect();
1845
1846             VariantInfo {
1847                 name: n.map(|n| n.to_string()),
1848                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1849                 align: layout.align.abi.bytes(),
1850                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1851                 fields: field_info,
1852             }
1853         };
1854
1855         match layout.variants {
1856             Variants::Single { index } => {
1857                 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1858                     debug!(
1859                         "print-type-size `{:#?}` variant {}",
1860                         layout,
1861                         adt_def.variant(index).name
1862                     );
1863                     let variant_def = &adt_def.variant(index);
1864                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1865                     record(
1866                         adt_kind.into(),
1867                         adt_packed,
1868                         None,
1869                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1870                     );
1871                 } else {
1872                     // (This case arises for *empty* enums; so give it
1873                     // zero variants.)
1874                     record(adt_kind.into(), adt_packed, None, vec![]);
1875                 }
1876             }
1877
1878             Variants::Multiple { tag, ref tag_encoding, .. } => {
1879                 debug!(
1880                     "print-type-size `{:#?}` adt general variants def {}",
1881                     layout.ty,
1882                     adt_def.variants().len()
1883                 );
1884                 let variant_infos: Vec<_> = adt_def
1885                     .variants()
1886                     .iter_enumerated()
1887                     .map(|(i, variant_def)| {
1888                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1889                         build_variant_info(
1890                             Some(variant_def.name),
1891                             &fields,
1892                             layout.for_variant(self, i),
1893                         )
1894                     })
1895                     .collect();
1896                 record(
1897                     adt_kind.into(),
1898                     adt_packed,
1899                     match tag_encoding {
1900                         TagEncoding::Direct => Some(tag.size(self)),
1901                         _ => None,
1902                     },
1903                     variant_infos,
1904                 );
1905             }
1906         }
1907     }
1908 }
1909
1910 /// Type size "skeleton", i.e., the only information determining a type's size.
1911 /// While this is conservative, (aside from constant sizes, only pointers,
1912 /// newtypes thereof and null pointer optimized enums are allowed), it is
1913 /// enough to statically check common use cases of transmute.
1914 #[derive(Copy, Clone, Debug)]
1915 pub enum SizeSkeleton<'tcx> {
1916     /// Any statically computable Layout.
1917     Known(Size),
1918
1919     /// A potentially-fat pointer.
1920     Pointer {
1921         /// If true, this pointer is never null.
1922         non_zero: bool,
1923         /// The type which determines the unsized metadata, if any,
1924         /// of this pointer. Either a type parameter or a projection
1925         /// depending on one, with regions erased.
1926         tail: Ty<'tcx>,
1927     },
1928 }
1929
1930 impl<'tcx> SizeSkeleton<'tcx> {
1931     pub fn compute(
1932         ty: Ty<'tcx>,
1933         tcx: TyCtxt<'tcx>,
1934         param_env: ty::ParamEnv<'tcx>,
1935     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1936         debug_assert!(!ty.has_infer_types_or_consts());
1937
1938         // First try computing a static layout.
1939         let err = match tcx.layout_of(param_env.and(ty)) {
1940             Ok(layout) => {
1941                 return Ok(SizeSkeleton::Known(layout.size));
1942             }
1943             Err(err) => err,
1944         };
1945
1946         match *ty.kind() {
1947             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1948                 let non_zero = !ty.is_unsafe_ptr();
1949                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1950                 match tail.kind() {
1951                     ty::Param(_) | ty::Projection(_) => {
1952                         debug_assert!(tail.has_param_types_or_consts());
1953                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1954                     }
1955                     _ => bug!(
1956                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1957                               tail `{}` is not a type parameter or a projection",
1958                         ty,
1959                         err,
1960                         tail
1961                     ),
1962                 }
1963             }
1964
1965             ty::Adt(def, substs) => {
1966                 // Only newtypes and enums w/ nullable pointer optimization.
1967                 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1968                     return Err(err);
1969                 }
1970
1971                 // Get a zero-sized variant or a pointer newtype.
1972                 let zero_or_ptr_variant = |i| {
1973                     let i = VariantIdx::new(i);
1974                     let fields =
1975                         def.variant(i).fields.iter().map(|field| {
1976                             SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1977                         });
1978                     let mut ptr = None;
1979                     for field in fields {
1980                         let field = field?;
1981                         match field {
1982                             SizeSkeleton::Known(size) => {
1983                                 if size.bytes() > 0 {
1984                                     return Err(err);
1985                                 }
1986                             }
1987                             SizeSkeleton::Pointer { .. } => {
1988                                 if ptr.is_some() {
1989                                     return Err(err);
1990                                 }
1991                                 ptr = Some(field);
1992                             }
1993                         }
1994                     }
1995                     Ok(ptr)
1996                 };
1997
1998                 let v0 = zero_or_ptr_variant(0)?;
1999                 // Newtype.
2000                 if def.variants().len() == 1 {
2001                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2002                         return Ok(SizeSkeleton::Pointer {
2003                             non_zero: non_zero
2004                                 || match tcx.layout_scalar_valid_range(def.did()) {
2005                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
2006                                     (Bound::Included(start), Bound::Included(end)) => {
2007                                         0 < start && start < end
2008                                     }
2009                                     _ => false,
2010                                 },
2011                             tail,
2012                         });
2013                     } else {
2014                         return Err(err);
2015                     }
2016                 }
2017
2018                 let v1 = zero_or_ptr_variant(1)?;
2019                 // Nullable pointer enum optimization.
2020                 match (v0, v1) {
2021                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2022                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2023                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2024                     }
2025                     _ => Err(err),
2026                 }
2027             }
2028
2029             ty::Projection(_) | ty::Opaque(..) => {
2030                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2031                 if ty == normalized {
2032                     Err(err)
2033                 } else {
2034                     SizeSkeleton::compute(normalized, tcx, param_env)
2035                 }
2036             }
2037
2038             _ => Err(err),
2039         }
2040     }
2041
2042     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2043         match (self, other) {
2044             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2045             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2046                 a == b
2047             }
2048             _ => false,
2049         }
2050     }
2051 }
2052
2053 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2054     fn tcx(&self) -> TyCtxt<'tcx>;
2055 }
2056
2057 pub trait HasParamEnv<'tcx> {
2058     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2059 }
2060
2061 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2062     #[inline]
2063     fn data_layout(&self) -> &TargetDataLayout {
2064         &self.data_layout
2065     }
2066 }
2067
2068 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2069     fn target_spec(&self) -> &Target {
2070         &self.sess.target
2071     }
2072 }
2073
2074 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2075     #[inline]
2076     fn tcx(&self) -> TyCtxt<'tcx> {
2077         *self
2078     }
2079 }
2080
2081 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2082     #[inline]
2083     fn data_layout(&self) -> &TargetDataLayout {
2084         &self.data_layout
2085     }
2086 }
2087
2088 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2089     fn target_spec(&self) -> &Target {
2090         &self.sess.target
2091     }
2092 }
2093
2094 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2095     #[inline]
2096     fn tcx(&self) -> TyCtxt<'tcx> {
2097         **self
2098     }
2099 }
2100
2101 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2102     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2103         self.param_env
2104     }
2105 }
2106
2107 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2108     fn data_layout(&self) -> &TargetDataLayout {
2109         self.tcx.data_layout()
2110     }
2111 }
2112
2113 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2114     fn target_spec(&self) -> &Target {
2115         self.tcx.target_spec()
2116     }
2117 }
2118
2119 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2120     fn tcx(&self) -> TyCtxt<'tcx> {
2121         self.tcx.tcx()
2122     }
2123 }
2124
2125 pub trait MaybeResult<T> {
2126     type Error;
2127
2128     fn from(x: Result<T, Self::Error>) -> Self;
2129     fn to_result(self) -> Result<T, Self::Error>;
2130 }
2131
2132 impl<T> MaybeResult<T> for T {
2133     type Error = !;
2134
2135     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2136         x
2137     }
2138     fn to_result(self) -> Result<T, Self::Error> {
2139         Ok(self)
2140     }
2141 }
2142
2143 impl<T, E> MaybeResult<T> for Result<T, E> {
2144     type Error = E;
2145
2146     fn from(x: Result<T, Self::Error>) -> Self {
2147         x
2148     }
2149     fn to_result(self) -> Result<T, Self::Error> {
2150         self
2151     }
2152 }
2153
2154 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2155
2156 /// Trait for contexts that want to be able to compute layouts of types.
2157 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2158 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2159     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2160     /// returned from `layout_of` (see also `handle_layout_err`).
2161     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2162
2163     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2164     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2165     #[inline]
2166     fn layout_tcx_at_span(&self) -> Span {
2167         DUMMY_SP
2168     }
2169
2170     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2171     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2172     ///
2173     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2174     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2175     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2176     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2177     fn handle_layout_err(
2178         &self,
2179         err: LayoutError<'tcx>,
2180         span: Span,
2181         ty: Ty<'tcx>,
2182     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2183 }
2184
2185 /// Blanket extension trait for contexts that can compute layouts of types.
2186 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2187     /// Computes the layout of a type. Note that this implicitly
2188     /// executes in "reveal all" mode, and will normalize the input type.
2189     #[inline]
2190     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2191         self.spanned_layout_of(ty, DUMMY_SP)
2192     }
2193
2194     /// Computes the layout of a type, at `span`. Note that this implicitly
2195     /// executes in "reveal all" mode, and will normalize the input type.
2196     // FIXME(eddyb) avoid passing information like this, and instead add more
2197     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2198     #[inline]
2199     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2200         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2201         let tcx = self.tcx().at(span);
2202
2203         MaybeResult::from(
2204             tcx.layout_of(self.param_env().and(ty))
2205                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2206         )
2207     }
2208 }
2209
2210 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2211
2212 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2213     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2214
2215     #[inline]
2216     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2217         err
2218     }
2219 }
2220
2221 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2222     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2223
2224     #[inline]
2225     fn layout_tcx_at_span(&self) -> Span {
2226         self.tcx.span
2227     }
2228
2229     #[inline]
2230     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2231         err
2232     }
2233 }
2234
2235 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2236 where
2237     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2238 {
2239     fn ty_and_layout_for_variant(
2240         this: TyAndLayout<'tcx>,
2241         cx: &C,
2242         variant_index: VariantIdx,
2243     ) -> TyAndLayout<'tcx> {
2244         let layout = match this.variants {
2245             Variants::Single { index }
2246                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2247                 if index == variant_index &&
2248                 // Don't confuse variants of uninhabited enums with the enum itself.
2249                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2250                 this.fields != FieldsShape::Primitive =>
2251             {
2252                 this.layout
2253             }
2254
2255             Variants::Single { index } => {
2256                 let tcx = cx.tcx();
2257                 let param_env = cx.param_env();
2258
2259                 // Deny calling for_variant more than once for non-Single enums.
2260                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2261                     assert_eq!(original_layout.variants, Variants::Single { index });
2262                 }
2263
2264                 let fields = match this.ty.kind() {
2265                     ty::Adt(def, _) if def.variants().is_empty() =>
2266                         bug!("for_variant called on zero-variant enum"),
2267                     ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2268                     _ => bug!(),
2269                 };
2270                 tcx.intern_layout(LayoutS {
2271                     variants: Variants::Single { index: variant_index },
2272                     fields: match NonZeroUsize::new(fields) {
2273                         Some(fields) => FieldsShape::Union(fields),
2274                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2275                     },
2276                     abi: Abi::Uninhabited,
2277                     largest_niche: None,
2278                     align: tcx.data_layout.i8_align,
2279                     size: Size::ZERO,
2280                 })
2281             }
2282
2283             Variants::Multiple { ref variants, .. } => variants[variant_index],
2284         };
2285
2286         assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2287
2288         TyAndLayout { ty: this.ty, layout }
2289     }
2290
2291     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2292         enum TyMaybeWithLayout<'tcx> {
2293             Ty(Ty<'tcx>),
2294             TyAndLayout(TyAndLayout<'tcx>),
2295         }
2296
2297         fn field_ty_or_layout<'tcx>(
2298             this: TyAndLayout<'tcx>,
2299             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2300             i: usize,
2301         ) -> TyMaybeWithLayout<'tcx> {
2302             let tcx = cx.tcx();
2303             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2304                 TyAndLayout {
2305                     layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2306                     ty: tag.primitive().to_ty(tcx),
2307                 }
2308             };
2309
2310             match *this.ty.kind() {
2311                 ty::Bool
2312                 | ty::Char
2313                 | ty::Int(_)
2314                 | ty::Uint(_)
2315                 | ty::Float(_)
2316                 | ty::FnPtr(_)
2317                 | ty::Never
2318                 | ty::FnDef(..)
2319                 | ty::GeneratorWitness(..)
2320                 | ty::Foreign(..)
2321                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2322
2323                 // Potentially-fat pointers.
2324                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2325                     assert!(i < this.fields.count());
2326
2327                     // Reuse the fat `*T` type as its own thin pointer data field.
2328                     // This provides information about, e.g., DST struct pointees
2329                     // (which may have no non-DST form), and will work as long
2330                     // as the `Abi` or `FieldsShape` is checked by users.
2331                     if i == 0 {
2332                         let nil = tcx.mk_unit();
2333                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2334                             tcx.mk_mut_ptr(nil)
2335                         } else {
2336                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2337                         };
2338
2339                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2340                         // the `Result` should always work because the type is
2341                         // always either `*mut ()` or `&'static mut ()`.
2342                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2343                             ty: this.ty,
2344                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2345                         });
2346                     }
2347
2348                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2349                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2350                         ty::Dynamic(_, _) => {
2351                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2352                                 tcx.lifetimes.re_static,
2353                                 tcx.mk_array(tcx.types.usize, 3),
2354                             ))
2355                             /* FIXME: use actual fn pointers
2356                             Warning: naively computing the number of entries in the
2357                             vtable by counting the methods on the trait + methods on
2358                             all parent traits does not work, because some methods can
2359                             be not object safe and thus excluded from the vtable.
2360                             Increase this counter if you tried to implement this but
2361                             failed to do it without duplicating a lot of code from
2362                             other places in the compiler: 2
2363                             tcx.mk_tup(&[
2364                                 tcx.mk_array(tcx.types.usize, 3),
2365                                 tcx.mk_array(Option<fn()>),
2366                             ])
2367                             */
2368                         }
2369                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2370                     }
2371                 }
2372
2373                 // Arrays and slices.
2374                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2375                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2376
2377                 // Tuples, generators and closures.
2378                 ty::Closure(_, ref substs) => field_ty_or_layout(
2379                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2380                     cx,
2381                     i,
2382                 ),
2383
2384                 ty::Generator(def_id, ref substs, _) => match this.variants {
2385                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2386                         substs
2387                             .as_generator()
2388                             .state_tys(def_id, tcx)
2389                             .nth(index.as_usize())
2390                             .unwrap()
2391                             .nth(i)
2392                             .unwrap(),
2393                     ),
2394                     Variants::Multiple { tag, tag_field, .. } => {
2395                         if i == tag_field {
2396                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2397                         }
2398                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2399                     }
2400                 },
2401
2402                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2403
2404                 // ADTs.
2405                 ty::Adt(def, substs) => {
2406                     match this.variants {
2407                         Variants::Single { index } => {
2408                             TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2409                         }
2410
2411                         // Discriminant field for enums (where applicable).
2412                         Variants::Multiple { tag, .. } => {
2413                             assert_eq!(i, 0);
2414                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2415                         }
2416                     }
2417                 }
2418
2419                 ty::Projection(_)
2420                 | ty::Bound(..)
2421                 | ty::Placeholder(..)
2422                 | ty::Opaque(..)
2423                 | ty::Param(_)
2424                 | ty::Infer(_)
2425                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2426             }
2427         }
2428
2429         match field_ty_or_layout(this, cx, i) {
2430             TyMaybeWithLayout::Ty(field_ty) => {
2431                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2432                     bug!(
2433                         "failed to get layout for `{}`: {},\n\
2434                          despite it being a field (#{}) of an existing layout: {:#?}",
2435                         field_ty,
2436                         e,
2437                         i,
2438                         this
2439                     )
2440                 })
2441             }
2442             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2443         }
2444     }
2445
2446     fn ty_and_layout_pointee_info_at(
2447         this: TyAndLayout<'tcx>,
2448         cx: &C,
2449         offset: Size,
2450     ) -> Option<PointeeInfo> {
2451         let tcx = cx.tcx();
2452         let param_env = cx.param_env();
2453
2454         let addr_space_of_ty = |ty: Ty<'tcx>| {
2455             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2456         };
2457
2458         let pointee_info = match *this.ty.kind() {
2459             ty::RawPtr(mt) if offset.bytes() == 0 => {
2460                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2461                     size: layout.size,
2462                     align: layout.align.abi,
2463                     safe: None,
2464                     address_space: addr_space_of_ty(mt.ty),
2465                 })
2466             }
2467             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2468                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2469                     size: layout.size,
2470                     align: layout.align.abi,
2471                     safe: None,
2472                     address_space: cx.data_layout().instruction_address_space,
2473                 })
2474             }
2475             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2476                 let address_space = addr_space_of_ty(ty);
2477                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2478                     // Use conservative pointer kind if not optimizing. This saves us the
2479                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2480                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2481                     PointerKind::Shared
2482                 } else {
2483                     match mt {
2484                         hir::Mutability::Not => {
2485                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2486                                 PointerKind::Frozen
2487                             } else {
2488                                 PointerKind::Shared
2489                             }
2490                         }
2491                         hir::Mutability::Mut => {
2492                             // References to self-referential structures should not be considered
2493                             // noalias, as another pointer to the structure can be obtained, that
2494                             // is not based-on the original reference. We consider all !Unpin
2495                             // types to be potentially self-referential here.
2496                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2497                                 PointerKind::UniqueBorrowed
2498                             } else {
2499                                 PointerKind::Shared
2500                             }
2501                         }
2502                     }
2503                 };
2504
2505                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2506                     size: layout.size,
2507                     align: layout.align.abi,
2508                     safe: Some(kind),
2509                     address_space,
2510                 })
2511             }
2512
2513             _ => {
2514                 let mut data_variant = match this.variants {
2515                     // Within the discriminant field, only the niche itself is
2516                     // always initialized, so we only check for a pointer at its
2517                     // offset.
2518                     //
2519                     // If the niche is a pointer, it's either valid (according
2520                     // to its type), or null (which the niche field's scalar
2521                     // validity range encodes).  This allows using
2522                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2523                     // this will continue to work as long as we don't start
2524                     // using more niches than just null (e.g., the first page of
2525                     // the address space, or unaligned pointers).
2526                     Variants::Multiple {
2527                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2528                         tag_field,
2529                         ..
2530                     } if this.fields.offset(tag_field) == offset => {
2531                         Some(this.for_variant(cx, dataful_variant))
2532                     }
2533                     _ => Some(this),
2534                 };
2535
2536                 if let Some(variant) = data_variant {
2537                     // We're not interested in any unions.
2538                     if let FieldsShape::Union(_) = variant.fields {
2539                         data_variant = None;
2540                     }
2541                 }
2542
2543                 let mut result = None;
2544
2545                 if let Some(variant) = data_variant {
2546                     let ptr_end = offset + Pointer.size(cx);
2547                     for i in 0..variant.fields.count() {
2548                         let field_start = variant.fields.offset(i);
2549                         if field_start <= offset {
2550                             let field = variant.field(cx, i);
2551                             result = field.to_result().ok().and_then(|field| {
2552                                 if ptr_end <= field_start + field.size {
2553                                     // We found the right field, look inside it.
2554                                     let field_info =
2555                                         field.pointee_info_at(cx, offset - field_start);
2556                                     field_info
2557                                 } else {
2558                                     None
2559                                 }
2560                             });
2561                             if result.is_some() {
2562                                 break;
2563                             }
2564                         }
2565                     }
2566                 }
2567
2568                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2569                 if let Some(ref mut pointee) = result {
2570                     if let ty::Adt(def, _) = this.ty.kind() {
2571                         if def.is_box() && offset.bytes() == 0 {
2572                             pointee.safe = Some(PointerKind::UniqueOwned);
2573                         }
2574                     }
2575                 }
2576
2577                 result
2578             }
2579         };
2580
2581         debug!(
2582             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2583             offset,
2584             this.ty.kind(),
2585             pointee_info
2586         );
2587
2588         pointee_info
2589     }
2590 }
2591
2592 impl<'tcx> ty::Instance<'tcx> {
2593     // NOTE(eddyb) this is private to avoid using it from outside of
2594     // `fn_abi_of_instance` - any other uses are either too high-level
2595     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2596     // or should go through `FnAbi` instead, to avoid losing any
2597     // adjustments `fn_abi_of_instance` might be performing.
2598     fn fn_sig_for_fn_abi(
2599         &self,
2600         tcx: TyCtxt<'tcx>,
2601         param_env: ty::ParamEnv<'tcx>,
2602     ) -> ty::PolyFnSig<'tcx> {
2603         let ty = self.ty(tcx, param_env);
2604         match *ty.kind() {
2605             ty::FnDef(..) => {
2606                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2607                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2608                 // (i.e. due to being inside a projection that got normalized, see
2609                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2610                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2611                 let mut sig = match *ty.kind() {
2612                     ty::FnDef(def_id, substs) => tcx
2613                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2614                         .subst(tcx, substs),
2615                     _ => unreachable!(),
2616                 };
2617
2618                 if let ty::InstanceDef::VtableShim(..) = self.def {
2619                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2620                     sig = sig.map_bound(|mut sig| {
2621                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2622                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2623                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2624                         sig
2625                     });
2626                 }
2627                 sig
2628             }
2629             ty::Closure(def_id, substs) => {
2630                 let sig = substs.as_closure().sig();
2631
2632                 let bound_vars = tcx.mk_bound_variable_kinds(
2633                     sig.bound_vars()
2634                         .iter()
2635                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2636                 );
2637                 let br = ty::BoundRegion {
2638                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2639                     kind: ty::BoundRegionKind::BrEnv,
2640                 };
2641                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2642                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2643
2644                 let sig = sig.skip_binder();
2645                 ty::Binder::bind_with_vars(
2646                     tcx.mk_fn_sig(
2647                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2648                         sig.output(),
2649                         sig.c_variadic,
2650                         sig.unsafety,
2651                         sig.abi,
2652                     ),
2653                     bound_vars,
2654                 )
2655             }
2656             ty::Generator(_, substs, _) => {
2657                 let sig = substs.as_generator().poly_sig();
2658
2659                 let bound_vars = tcx.mk_bound_variable_kinds(
2660                     sig.bound_vars()
2661                         .iter()
2662                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2663                 );
2664                 let br = ty::BoundRegion {
2665                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2666                     kind: ty::BoundRegionKind::BrEnv,
2667                 };
2668                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2669                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2670
2671                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2672                 let pin_adt_ref = tcx.adt_def(pin_did);
2673                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2674                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2675
2676                 let sig = sig.skip_binder();
2677                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2678                 let state_adt_ref = tcx.adt_def(state_did);
2679                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2680                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2681                 ty::Binder::bind_with_vars(
2682                     tcx.mk_fn_sig(
2683                         [env_ty, sig.resume_ty].iter(),
2684                         &ret_ty,
2685                         false,
2686                         hir::Unsafety::Normal,
2687                         rustc_target::spec::abi::Abi::Rust,
2688                     ),
2689                     bound_vars,
2690                 )
2691             }
2692             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2693         }
2694     }
2695 }
2696
2697 /// Calculates whether a function's ABI can unwind or not.
2698 ///
2699 /// This takes two primary parameters:
2700 ///
2701 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2702 ///   codegen attrs for a defined function. For function pointers this set of
2703 ///   flags is the empty set. This is only applicable for Rust-defined
2704 ///   functions, and generally isn't needed except for small optimizations where
2705 ///   we try to say a function which otherwise might look like it could unwind
2706 ///   doesn't actually unwind (such as for intrinsics and such).
2707 ///
2708 /// * `abi` - this is the ABI that the function is defined with. This is the
2709 ///   primary factor for determining whether a function can unwind or not.
2710 ///
2711 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2712 /// panics are implemented with unwinds on most platform (when
2713 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2714 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2715 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2716 /// defined for each ABI individually, but it always corresponds to some form of
2717 /// stack-based unwinding (the exact mechanism of which varies
2718 /// platform-by-platform).
2719 ///
2720 /// Rust functions are classified whether or not they can unwind based on the
2721 /// active "panic strategy". In other words Rust functions are considered to
2722 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2723 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2724 /// only if the final panic mode is panic=abort. In this scenario any code
2725 /// previously compiled assuming that a function can unwind is still correct, it
2726 /// just never happens to actually unwind at runtime.
2727 ///
2728 /// This function's answer to whether or not a function can unwind is quite
2729 /// impactful throughout the compiler. This affects things like:
2730 ///
2731 /// * Calling a function which can't unwind means codegen simply ignores any
2732 ///   associated unwinding cleanup.
2733 /// * Calling a function which can unwind from a function which can't unwind
2734 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2735 ///   aborts the process.
2736 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2737 ///   affects various optimizations and codegen.
2738 ///
2739 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2740 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2741 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2742 /// might (from a foreign exception or similar).
2743 #[inline]
2744 pub fn fn_can_unwind<'tcx>(
2745     tcx: TyCtxt<'tcx>,
2746     codegen_fn_attr_flags: CodegenFnAttrFlags,
2747     abi: SpecAbi,
2748 ) -> bool {
2749     // Special attribute for functions which can't unwind.
2750     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2751         return false;
2752     }
2753
2754     // Otherwise if this isn't special then unwinding is generally determined by
2755     // the ABI of the itself. ABIs like `C` have variants which also
2756     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2757     // ABIs have such an option. Otherwise the only other thing here is Rust
2758     // itself, and those ABIs are determined by the panic strategy configured
2759     // for this compilation.
2760     //
2761     // Unfortunately at this time there's also another caveat. Rust [RFC
2762     // 2945][rfc] has been accepted and is in the process of being implemented
2763     // and stabilized. In this interim state we need to deal with historical
2764     // rustc behavior as well as plan for future rustc behavior.
2765     //
2766     // Historically functions declared with `extern "C"` were marked at the
2767     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2768     // or not. This is UB for functions in `panic=unwind` mode that then
2769     // actually panic and unwind. Note that this behavior is true for both
2770     // externally declared functions as well as Rust-defined function.
2771     //
2772     // To fix this UB rustc would like to change in the future to catch unwinds
2773     // from function calls that may unwind within a Rust-defined `extern "C"`
2774     // function and forcibly abort the process, thereby respecting the
2775     // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2776     // ready to roll out, so determining whether or not the `C` family of ABIs
2777     // unwinds is conditional not only on their definition but also whether the
2778     // `#![feature(c_unwind)]` feature gate is active.
2779     //
2780     // Note that this means that unlike historical compilers rustc now, by
2781     // default, unconditionally thinks that the `C` ABI may unwind. This will
2782     // prevent some optimization opportunities, however, so we try to scope this
2783     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2784     // to `panic=abort`).
2785     //
2786     // Eventually the check against `c_unwind` here will ideally get removed and
2787     // this'll be a little cleaner as it'll be a straightforward check of the
2788     // ABI.
2789     //
2790     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2791     use SpecAbi::*;
2792     match abi {
2793         C { unwind }
2794         | System { unwind }
2795         | Cdecl { unwind }
2796         | Stdcall { unwind }
2797         | Fastcall { unwind }
2798         | Vectorcall { unwind }
2799         | Thiscall { unwind }
2800         | Aapcs { unwind }
2801         | Win64 { unwind }
2802         | SysV64 { unwind } => {
2803             unwind
2804                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2805         }
2806         PtxKernel
2807         | Msp430Interrupt
2808         | X86Interrupt
2809         | AmdGpuKernel
2810         | EfiApi
2811         | AvrInterrupt
2812         | AvrNonBlockingInterrupt
2813         | CCmseNonSecureCall
2814         | Wasm
2815         | RustIntrinsic
2816         | PlatformIntrinsic
2817         | Unadjusted => false,
2818         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2819     }
2820 }
2821
2822 #[inline]
2823 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2824     use rustc_target::spec::abi::Abi::*;
2825     match tcx.sess.target.adjust_abi(abi) {
2826         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2827
2828         // It's the ABI's job to select this, not ours.
2829         System { .. } => bug!("system abi should be selected elsewhere"),
2830         EfiApi => bug!("eficall abi should be selected elsewhere"),
2831
2832         Stdcall { .. } => Conv::X86Stdcall,
2833         Fastcall { .. } => Conv::X86Fastcall,
2834         Vectorcall { .. } => Conv::X86VectorCall,
2835         Thiscall { .. } => Conv::X86ThisCall,
2836         C { .. } => Conv::C,
2837         Unadjusted => Conv::C,
2838         Win64 { .. } => Conv::X86_64Win64,
2839         SysV64 { .. } => Conv::X86_64SysV,
2840         Aapcs { .. } => Conv::ArmAapcs,
2841         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2842         PtxKernel => Conv::PtxKernel,
2843         Msp430Interrupt => Conv::Msp430Intr,
2844         X86Interrupt => Conv::X86Intr,
2845         AmdGpuKernel => Conv::AmdGpuKernel,
2846         AvrInterrupt => Conv::AvrInterrupt,
2847         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2848         Wasm => Conv::C,
2849
2850         // These API constants ought to be more specific...
2851         Cdecl { .. } => Conv::C,
2852     }
2853 }
2854
2855 /// Error produced by attempting to compute or adjust a `FnAbi`.
2856 #[derive(Copy, Clone, Debug, HashStable)]
2857 pub enum FnAbiError<'tcx> {
2858     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2859     Layout(LayoutError<'tcx>),
2860
2861     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2862     AdjustForForeignAbi(call::AdjustForForeignAbiError),
2863 }
2864
2865 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2866     fn from(err: LayoutError<'tcx>) -> Self {
2867         Self::Layout(err)
2868     }
2869 }
2870
2871 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2872     fn from(err: call::AdjustForForeignAbiError) -> Self {
2873         Self::AdjustForForeignAbi(err)
2874     }
2875 }
2876
2877 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2878     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2879         match self {
2880             Self::Layout(err) => err.fmt(f),
2881             Self::AdjustForForeignAbi(err) => err.fmt(f),
2882         }
2883     }
2884 }
2885
2886 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2887 // just for error handling.
2888 #[derive(Debug)]
2889 pub enum FnAbiRequest<'tcx> {
2890     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2891     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2892 }
2893
2894 /// Trait for contexts that want to be able to compute `FnAbi`s.
2895 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2896 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2897     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2898     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2899     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2900
2901     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2902     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2903     ///
2904     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2905     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2906     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2907     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2908     fn handle_fn_abi_err(
2909         &self,
2910         err: FnAbiError<'tcx>,
2911         span: Span,
2912         fn_abi_request: FnAbiRequest<'tcx>,
2913     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2914 }
2915
2916 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2917 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2918     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2919     ///
2920     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2921     /// instead, where the instance is an `InstanceDef::Virtual`.
2922     #[inline]
2923     fn fn_abi_of_fn_ptr(
2924         &self,
2925         sig: ty::PolyFnSig<'tcx>,
2926         extra_args: &'tcx ty::List<Ty<'tcx>>,
2927     ) -> Self::FnAbiOfResult {
2928         // FIXME(eddyb) get a better `span` here.
2929         let span = self.layout_tcx_at_span();
2930         let tcx = self.tcx().at(span);
2931
2932         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2933             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2934         ))
2935     }
2936
2937     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2938     /// direct calls to an `fn`.
2939     ///
2940     /// NB: that includes virtual calls, which are represented by "direct calls"
2941     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2942     #[inline]
2943     fn fn_abi_of_instance(
2944         &self,
2945         instance: ty::Instance<'tcx>,
2946         extra_args: &'tcx ty::List<Ty<'tcx>>,
2947     ) -> Self::FnAbiOfResult {
2948         // FIXME(eddyb) get a better `span` here.
2949         let span = self.layout_tcx_at_span();
2950         let tcx = self.tcx().at(span);
2951
2952         MaybeResult::from(
2953             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2954                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2955                 // we can get some kind of span even if one wasn't provided.
2956                 // However, we don't do this early in order to avoid calling
2957                 // `def_span` unconditionally (which may have a perf penalty).
2958                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2959                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2960             }),
2961         )
2962     }
2963 }
2964
2965 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2966
2967 fn fn_abi_of_fn_ptr<'tcx>(
2968     tcx: TyCtxt<'tcx>,
2969     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2970 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2971     let (param_env, (sig, extra_args)) = query.into_parts();
2972
2973     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2974         sig,
2975         extra_args,
2976         None,
2977         CodegenFnAttrFlags::empty(),
2978         false,
2979     )
2980 }
2981
2982 fn fn_abi_of_instance<'tcx>(
2983     tcx: TyCtxt<'tcx>,
2984     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2985 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2986     let (param_env, (instance, extra_args)) = query.into_parts();
2987
2988     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
2989
2990     let caller_location = if instance.def.requires_caller_location(tcx) {
2991         Some(tcx.caller_location_ty())
2992     } else {
2993         None
2994     };
2995
2996     let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
2997
2998     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2999         sig,
3000         extra_args,
3001         caller_location,
3002         attrs,
3003         matches!(instance.def, ty::InstanceDef::Virtual(..)),
3004     )
3005 }
3006
3007 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3008     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3009     // arguments of this method, into a separate `struct`.
3010     fn fn_abi_new_uncached(
3011         &self,
3012         sig: ty::PolyFnSig<'tcx>,
3013         extra_args: &[Ty<'tcx>],
3014         caller_location: Option<Ty<'tcx>>,
3015         codegen_fn_attr_flags: CodegenFnAttrFlags,
3016         // FIXME(eddyb) replace this with something typed, like an `enum`.
3017         force_thin_self_ptr: bool,
3018     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3019         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3020
3021         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3022
3023         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3024
3025         let mut inputs = sig.inputs();
3026         let extra_args = if sig.abi == RustCall {
3027             assert!(!sig.c_variadic && extra_args.is_empty());
3028
3029             if let Some(input) = sig.inputs().last() {
3030                 if let ty::Tuple(tupled_arguments) = input.kind() {
3031                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3032                     tupled_arguments
3033                 } else {
3034                     bug!(
3035                         "argument to function with \"rust-call\" ABI \
3036                             is not a tuple"
3037                     );
3038                 }
3039             } else {
3040                 bug!(
3041                     "argument to function with \"rust-call\" ABI \
3042                         is not a tuple"
3043                 );
3044             }
3045         } else {
3046             assert!(sig.c_variadic || extra_args.is_empty());
3047             extra_args
3048         };
3049
3050         let target = &self.tcx.sess.target;
3051         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3052         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3053         let linux_s390x_gnu_like =
3054             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3055         let linux_sparc64_gnu_like =
3056             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3057         let linux_powerpc_gnu_like =
3058             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3059         use SpecAbi::*;
3060         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3061
3062         // Handle safe Rust thin and fat pointers.
3063         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3064                                       scalar: Scalar,
3065                                       layout: TyAndLayout<'tcx>,
3066                                       offset: Size,
3067                                       is_return: bool| {
3068             // Booleans are always a noundef i1 that needs to be zero-extended.
3069             if scalar.is_bool() {
3070                 attrs.ext(ArgExtension::Zext);
3071                 attrs.set(ArgAttribute::NoUndef);
3072                 return;
3073             }
3074
3075             // Scalars which have invalid values cannot be undef.
3076             if !scalar.is_always_valid(self) {
3077                 attrs.set(ArgAttribute::NoUndef);
3078             }
3079
3080             // Only pointer types handled below.
3081             let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3082
3083             if !valid_range.contains(0) {
3084                 attrs.set(ArgAttribute::NonNull);
3085             }
3086
3087             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3088                 if let Some(kind) = pointee.safe {
3089                     attrs.pointee_align = Some(pointee.align);
3090
3091                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3092                     // for the entire duration of the function as they can be deallocated
3093                     // at any time. Set their valid size to 0.
3094                     attrs.pointee_size = match kind {
3095                         PointerKind::UniqueOwned => Size::ZERO,
3096                         _ => pointee.size,
3097                     };
3098
3099                     // `Box`, `&T`, and `&mut T` cannot be undef.
3100                     // Note that this only applies to the value of the pointer itself;
3101                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3102                     attrs.set(ArgAttribute::NoUndef);
3103
3104                     // `Box` pointer parameters never alias because ownership is transferred
3105                     // `&mut` pointer parameters never alias other parameters,
3106                     // or mutable global data
3107                     //
3108                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3109                     // and can be marked as both `readonly` and `noalias`, as
3110                     // LLVM's definition of `noalias` is based solely on memory
3111                     // dependencies rather than pointer equality
3112                     //
3113                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3114                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3115                     // or not to actually emit the attribute. It can also be controlled with the
3116                     // `-Zmutable-noalias` debugging option.
3117                     let no_alias = match kind {
3118                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3119                         PointerKind::UniqueOwned => true,
3120                         PointerKind::Frozen => !is_return,
3121                     };
3122                     if no_alias {
3123                         attrs.set(ArgAttribute::NoAlias);
3124                     }
3125
3126                     if kind == PointerKind::Frozen && !is_return {
3127                         attrs.set(ArgAttribute::ReadOnly);
3128                     }
3129
3130                     if kind == PointerKind::UniqueBorrowed && !is_return {
3131                         attrs.set(ArgAttribute::NoAliasMutRef);
3132                     }
3133                 }
3134             }
3135         };
3136
3137         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3138             let is_return = arg_idx.is_none();
3139
3140             let layout = self.layout_of(ty)?;
3141             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3142                 // Don't pass the vtable, it's not an argument of the virtual fn.
3143                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3144                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3145                 make_thin_self_ptr(self, layout)
3146             } else {
3147                 layout
3148             };
3149
3150             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3151                 let mut attrs = ArgAttributes::new();
3152                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3153                 attrs
3154             });
3155
3156             if arg.layout.is_zst() {
3157                 // For some forsaken reason, x86_64-pc-windows-gnu
3158                 // doesn't ignore zero-sized struct arguments.
3159                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3160                 if is_return
3161                     || rust_abi
3162                     || (!win_x64_gnu
3163                         && !linux_s390x_gnu_like
3164                         && !linux_sparc64_gnu_like
3165                         && !linux_powerpc_gnu_like)
3166                 {
3167                     arg.mode = PassMode::Ignore;
3168                 }
3169             }
3170
3171             Ok(arg)
3172         };
3173
3174         let mut fn_abi = FnAbi {
3175             ret: arg_of(sig.output(), None)?,
3176             args: inputs
3177                 .iter()
3178                 .copied()
3179                 .chain(extra_args.iter().copied())
3180                 .chain(caller_location)
3181                 .enumerate()
3182                 .map(|(i, ty)| arg_of(ty, Some(i)))
3183                 .collect::<Result<_, _>>()?,
3184             c_variadic: sig.c_variadic,
3185             fixed_count: inputs.len(),
3186             conv,
3187             can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3188         };
3189         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3190         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3191         Ok(self.tcx.arena.alloc(fn_abi))
3192     }
3193
3194     fn fn_abi_adjust_for_abi(
3195         &self,
3196         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3197         abi: SpecAbi,
3198     ) -> Result<(), FnAbiError<'tcx>> {
3199         if abi == SpecAbi::Unadjusted {
3200             return Ok(());
3201         }
3202
3203         if abi == SpecAbi::Rust
3204             || abi == SpecAbi::RustCall
3205             || abi == SpecAbi::RustIntrinsic
3206             || abi == SpecAbi::PlatformIntrinsic
3207         {
3208             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3209                 if arg.is_ignore() {
3210                     return;
3211                 }
3212
3213                 match arg.layout.abi {
3214                     Abi::Aggregate { .. } => {}
3215
3216                     // This is a fun case! The gist of what this is doing is
3217                     // that we want callers and callees to always agree on the
3218                     // ABI of how they pass SIMD arguments. If we were to *not*
3219                     // make these arguments indirect then they'd be immediates
3220                     // in LLVM, which means that they'd used whatever the
3221                     // appropriate ABI is for the callee and the caller. That
3222                     // means, for example, if the caller doesn't have AVX
3223                     // enabled but the callee does, then passing an AVX argument
3224                     // across this boundary would cause corrupt data to show up.
3225                     //
3226                     // This problem is fixed by unconditionally passing SIMD
3227                     // arguments through memory between callers and callees
3228                     // which should get them all to agree on ABI regardless of
3229                     // target feature sets. Some more information about this
3230                     // issue can be found in #44367.
3231                     //
3232                     // Note that the platform intrinsic ABI is exempt here as
3233                     // that's how we connect up to LLVM and it's unstable
3234                     // anyway, we control all calls to it in libstd.
3235                     Abi::Vector { .. }
3236                         if abi != SpecAbi::PlatformIntrinsic
3237                             && self.tcx.sess.target.simd_types_indirect =>
3238                     {
3239                         arg.make_indirect();
3240                         return;
3241                     }
3242
3243                     _ => return,
3244                 }
3245
3246                 let size = arg.layout.size;
3247                 if arg.layout.is_unsized() || size > Pointer.size(self) {
3248                     arg.make_indirect();
3249                 } else {
3250                     // We want to pass small aggregates as immediates, but using
3251                     // a LLVM aggregate type for this leads to bad optimizations,
3252                     // so we pick an appropriately sized integer type instead.
3253                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3254                 }
3255             };
3256             fixup(&mut fn_abi.ret);
3257             for arg in &mut fn_abi.args {
3258                 fixup(arg);
3259             }
3260         } else {
3261             fn_abi.adjust_for_foreign_abi(self, abi)?;
3262         }
3263
3264         Ok(())
3265     }
3266 }
3267
3268 fn make_thin_self_ptr<'tcx>(
3269     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3270     layout: TyAndLayout<'tcx>,
3271 ) -> TyAndLayout<'tcx> {
3272     let tcx = cx.tcx();
3273     let fat_pointer_ty = if layout.is_unsized() {
3274         // unsized `self` is passed as a pointer to `self`
3275         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3276         tcx.mk_mut_ptr(layout.ty)
3277     } else {
3278         match layout.abi {
3279             Abi::ScalarPair(..) => (),
3280             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3281         }
3282
3283         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3284         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3285         // elsewhere in the compiler as a method on a `dyn Trait`.
3286         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3287         // get a built-in pointer type
3288         let mut fat_pointer_layout = layout;
3289         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3290             && !fat_pointer_layout.ty.is_region_ptr()
3291         {
3292             for i in 0..fat_pointer_layout.fields.count() {
3293                 let field_layout = fat_pointer_layout.field(cx, i);
3294
3295                 if !field_layout.is_zst() {
3296                     fat_pointer_layout = field_layout;
3297                     continue 'descend_newtypes;
3298                 }
3299             }
3300
3301             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3302         }
3303
3304         fat_pointer_layout.ty
3305     };
3306
3307     // we now have a type like `*mut RcBox<dyn Trait>`
3308     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3309     // this is understood as a special case elsewhere in the compiler
3310     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3311
3312     TyAndLayout {
3313         ty: fat_pointer_ty,
3314
3315         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3316         // should always work because the type is always `*mut ()`.
3317         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3318     }
3319 }