]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
rename ErrorReported -> ErrorGuaranteed
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_hir as hir;
9 use rustc_hir::lang_items::LangItem;
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::{Idx, IndexVec};
12 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::{Span, DUMMY_SP};
15 use rustc_target::abi::call::{
16     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
17 };
18 use rustc_target::abi::*;
19 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
20
21 use std::cmp;
22 use std::fmt;
23 use std::iter;
24 use std::num::NonZeroUsize;
25 use std::ops::Bound;
26
27 use rand::{seq::SliceRandom, SeedableRng};
28 use rand_xoshiro::Xoshiro128StarStar;
29
30 pub fn provide(providers: &mut ty::query::Providers) {
31     *providers =
32         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
33 }
34
35 pub trait IntegerExt {
36     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
37     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
38     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
39     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
40     fn repr_discr<'tcx>(
41         tcx: TyCtxt<'tcx>,
42         ty: Ty<'tcx>,
43         repr: &ReprOptions,
44         min: i128,
45         max: i128,
46     ) -> (Integer, bool);
47 }
48
49 impl IntegerExt for Integer {
50     #[inline]
51     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
52         match (*self, signed) {
53             (I8, false) => tcx.types.u8,
54             (I16, false) => tcx.types.u16,
55             (I32, false) => tcx.types.u32,
56             (I64, false) => tcx.types.u64,
57             (I128, false) => tcx.types.u128,
58             (I8, true) => tcx.types.i8,
59             (I16, true) => tcx.types.i16,
60             (I32, true) => tcx.types.i32,
61             (I64, true) => tcx.types.i64,
62             (I128, true) => tcx.types.i128,
63         }
64     }
65
66     /// Gets the Integer type from an attr::IntType.
67     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
68         let dl = cx.data_layout();
69
70         match ity {
71             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
72             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
73             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
74             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
75             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
76             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
77                 dl.ptr_sized_integer()
78             }
79         }
80     }
81
82     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
83         match ity {
84             ty::IntTy::I8 => I8,
85             ty::IntTy::I16 => I16,
86             ty::IntTy::I32 => I32,
87             ty::IntTy::I64 => I64,
88             ty::IntTy::I128 => I128,
89             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
90         }
91     }
92     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
93         match ity {
94             ty::UintTy::U8 => I8,
95             ty::UintTy::U16 => I16,
96             ty::UintTy::U32 => I32,
97             ty::UintTy::U64 => I64,
98             ty::UintTy::U128 => I128,
99             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
100         }
101     }
102
103     /// Finds the appropriate Integer type and signedness for the given
104     /// signed discriminant range and `#[repr]` attribute.
105     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
106     /// that shouldn't affect anything, other than maybe debuginfo.
107     fn repr_discr<'tcx>(
108         tcx: TyCtxt<'tcx>,
109         ty: Ty<'tcx>,
110         repr: &ReprOptions,
111         min: i128,
112         max: i128,
113     ) -> (Integer, bool) {
114         // Theoretically, negative values could be larger in unsigned representation
115         // than the unsigned representation of the signed minimum. However, if there
116         // are any negative values, the only valid unsigned representation is u128
117         // which can fit all i128 values, so the result remains unaffected.
118         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
119         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
120
121         if let Some(ity) = repr.int {
122             let discr = Integer::from_attr(&tcx, ity);
123             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
124             if discr < fit {
125                 bug!(
126                     "Integer::repr_discr: `#[repr]` hint too small for \
127                       discriminant range of enum `{}",
128                     ty
129                 )
130             }
131             return (discr, ity.is_signed());
132         }
133
134         let at_least = if repr.c() {
135             // This is usually I32, however it can be different on some platforms,
136             // notably hexagon and arm-none/thumb-none
137             tcx.data_layout().c_enum_min_size
138         } else {
139             // repr(Rust) enums try to be as small as possible
140             I8
141         };
142
143         // If there are no negative values, we can use the unsigned fit.
144         if min >= 0 {
145             (cmp::max(unsigned_fit, at_least), false)
146         } else {
147             (cmp::max(signed_fit, at_least), true)
148         }
149     }
150 }
151
152 pub trait PrimitiveExt {
153     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
154     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 }
156
157 impl PrimitiveExt for Primitive {
158     #[inline]
159     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
160         match *self {
161             Int(i, signed) => i.to_ty(tcx, signed),
162             F32 => tcx.types.f32,
163             F64 => tcx.types.f64,
164             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
165         }
166     }
167
168     /// Return an *integer* type matching this primitive.
169     /// Useful in particular when dealing with enum discriminants.
170     #[inline]
171     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
172         match *self {
173             Int(i, signed) => i.to_ty(tcx, signed),
174             Pointer => tcx.types.usize,
175             F32 | F64 => bug!("floats do not have an int type"),
176         }
177     }
178 }
179
180 /// The first half of a fat pointer.
181 ///
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
185
186 /// The second half of a fat pointer.
187 ///
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
191
192 /// The maximum supported number of lanes in a SIMD vector.
193 ///
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
198
199 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
201     Unknown(Ty<'tcx>),
202     SizeOverflow(Ty<'tcx>),
203     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
204 }
205
206 impl<'tcx> fmt::Display for LayoutError<'tcx> {
207     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208         match *self {
209             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
210             LayoutError::SizeOverflow(ty) => {
211                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
212             }
213             LayoutError::NormalizationFailure(t, e) => write!(
214                 f,
215                 "unable to determine layout for `{}` because `{}` cannot be normalized",
216                 t,
217                 e.get_type_for_failure()
218             ),
219         }
220     }
221 }
222
223 #[instrument(skip(tcx, query), level = "debug")]
224 fn layout_of<'tcx>(
225     tcx: TyCtxt<'tcx>,
226     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
227 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
228     ty::tls::with_related_context(tcx, move |icx| {
229         let (param_env, ty) = query.into_parts();
230         debug!(?ty);
231
232         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
233             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
234         }
235
236         // Update the ImplicitCtxt to increase the layout_depth
237         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
238
239         ty::tls::enter_context(&icx, |_| {
240             let param_env = param_env.with_reveal_all_normalized(tcx);
241             let unnormalized_ty = ty;
242
243             // FIXME: We might want to have two different versions of `layout_of`:
244             // One that can be called after typecheck has completed and can use
245             // `normalize_erasing_regions` here and another one that can be called
246             // before typecheck has completed and uses `try_normalize_erasing_regions`.
247             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
248                 Ok(t) => t,
249                 Err(normalization_error) => {
250                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
251                 }
252             };
253
254             if ty != unnormalized_ty {
255                 // Ensure this layout is also cached for the normalized type.
256                 return tcx.layout_of(param_env.and(ty));
257             }
258
259             let cx = LayoutCx { tcx, param_env };
260
261             let layout = cx.layout_of_uncached(ty)?;
262             let layout = TyAndLayout { ty, layout };
263
264             cx.record_layout_for_printing(layout);
265
266             // Type-level uninhabitedness should always imply ABI uninhabitedness.
267             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
268                 assert!(layout.abi.is_uninhabited());
269             }
270
271             Ok(layout)
272         })
273     })
274 }
275
276 pub struct LayoutCx<'tcx, C> {
277     pub tcx: C,
278     pub param_env: ty::ParamEnv<'tcx>,
279 }
280
281 #[derive(Copy, Clone, Debug)]
282 enum StructKind {
283     /// A tuple, closure, or univariant which cannot be coerced to unsized.
284     AlwaysSized,
285     /// A univariant, the last field of which may be coerced to unsized.
286     MaybeUnsized,
287     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
288     Prefixed(Size, Align),
289 }
290
291 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
292 // This is used to go between `memory_index` (source field order to memory order)
293 // and `inverse_memory_index` (memory order to source field order).
294 // See also `FieldsShape::Arbitrary::memory_index` for more details.
295 // FIXME(eddyb) build a better abstraction for permutations, if possible.
296 fn invert_mapping(map: &[u32]) -> Vec<u32> {
297     let mut inverse = vec![0; map.len()];
298     for i in 0..map.len() {
299         inverse[map[i] as usize] = i as u32;
300     }
301     inverse
302 }
303
304 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
305     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
306         let dl = self.data_layout();
307         let b_align = b.value.align(dl);
308         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
309         let b_offset = a.value.size(dl).align_to(b_align.abi);
310         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
311
312         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
313         // returns the last maximum.
314         let largest_niche = Niche::from_scalar(dl, b_offset, b)
315             .into_iter()
316             .chain(Niche::from_scalar(dl, Size::ZERO, a))
317             .max_by_key(|niche| niche.available(dl));
318
319         Layout {
320             variants: Variants::Single { index: VariantIdx::new(0) },
321             fields: FieldsShape::Arbitrary {
322                 offsets: vec![Size::ZERO, b_offset],
323                 memory_index: vec![0, 1],
324             },
325             abi: Abi::ScalarPair(a, b),
326             largest_niche,
327             align,
328             size,
329         }
330     }
331
332     fn univariant_uninterned(
333         &self,
334         ty: Ty<'tcx>,
335         fields: &[TyAndLayout<'_>],
336         repr: &ReprOptions,
337         kind: StructKind,
338     ) -> Result<Layout, LayoutError<'tcx>> {
339         let dl = self.data_layout();
340         let pack = repr.pack;
341         if pack.is_some() && repr.align.is_some() {
342             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
343             return Err(LayoutError::Unknown(ty));
344         }
345
346         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
347
348         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
349
350         let optimize = !repr.inhibit_struct_field_reordering_opt();
351         if optimize {
352             let end =
353                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
354             let optimizing = &mut inverse_memory_index[..end];
355             let field_align = |f: &TyAndLayout<'_>| {
356                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
357             };
358
359             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
360             // the field ordering to try and catch some code making assumptions about layouts
361             // we don't guarantee
362             if repr.can_randomize_type_layout() {
363                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
364                 // randomize field ordering with
365                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
366
367                 // Shuffle the ordering of the fields
368                 optimizing.shuffle(&mut rng);
369
370             // Otherwise we just leave things alone and actually optimize the type's fields
371             } else {
372                 match kind {
373                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
374                         optimizing.sort_by_key(|&x| {
375                             // Place ZSTs first to avoid "interesting offsets",
376                             // especially with only one or two non-ZST fields.
377                             let f = &fields[x as usize];
378                             (!f.is_zst(), cmp::Reverse(field_align(f)))
379                         });
380                     }
381
382                     StructKind::Prefixed(..) => {
383                         // Sort in ascending alignment so that the layout stays optimal
384                         // regardless of the prefix
385                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
386                     }
387                 }
388
389                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
390                 //                 regardless of the status of `-Z randomize-layout`
391             }
392         }
393
394         // inverse_memory_index holds field indices by increasing memory offset.
395         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
396         // We now write field offsets to the corresponding offset slot;
397         // field 5 with offset 0 puts 0 in offsets[5].
398         // At the bottom of this function, we invert `inverse_memory_index` to
399         // produce `memory_index` (see `invert_mapping`).
400
401         let mut sized = true;
402         let mut offsets = vec![Size::ZERO; fields.len()];
403         let mut offset = Size::ZERO;
404         let mut largest_niche = None;
405         let mut largest_niche_available = 0;
406
407         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
408             let prefix_align =
409                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
410             align = align.max(AbiAndPrefAlign::new(prefix_align));
411             offset = prefix_size.align_to(prefix_align);
412         }
413
414         for &i in &inverse_memory_index {
415             let field = fields[i as usize];
416             if !sized {
417                 self.tcx.sess.delay_span_bug(
418                     DUMMY_SP,
419                     &format!(
420                         "univariant: field #{} of `{}` comes after unsized field",
421                         offsets.len(),
422                         ty
423                     ),
424                 );
425             }
426
427             if field.is_unsized() {
428                 sized = false;
429             }
430
431             // Invariant: offset < dl.obj_size_bound() <= 1<<61
432             let field_align = if let Some(pack) = pack {
433                 field.align.min(AbiAndPrefAlign::new(pack))
434             } else {
435                 field.align
436             };
437             offset = offset.align_to(field_align.abi);
438             align = align.max(field_align);
439
440             debug!("univariant offset: {:?} field: {:#?}", offset, field);
441             offsets[i as usize] = offset;
442
443             if !repr.hide_niche() {
444                 if let Some(mut niche) = field.largest_niche {
445                     let available = niche.available(dl);
446                     if available > largest_niche_available {
447                         largest_niche_available = available;
448                         niche.offset += offset;
449                         largest_niche = Some(niche);
450                     }
451                 }
452             }
453
454             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
455         }
456
457         if let Some(repr_align) = repr.align {
458             align = align.max(AbiAndPrefAlign::new(repr_align));
459         }
460
461         debug!("univariant min_size: {:?}", offset);
462         let min_size = offset;
463
464         // As stated above, inverse_memory_index holds field indices by increasing offset.
465         // This makes it an already-sorted view of the offsets vec.
466         // To invert it, consider:
467         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
468         // Field 5 would be the first element, so memory_index is i:
469         // Note: if we didn't optimize, it's already right.
470
471         let memory_index =
472             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
473
474         let size = min_size.align_to(align.abi);
475         let mut abi = Abi::Aggregate { sized };
476
477         // Unpack newtype ABIs and find scalar pairs.
478         if sized && size.bytes() > 0 {
479             // All other fields must be ZSTs.
480             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
481
482             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
483                 // We have exactly one non-ZST field.
484                 (Some((i, field)), None, None) => {
485                     // Field fills the struct and it has a scalar or scalar pair ABI.
486                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
487                     {
488                         match field.abi {
489                             // For plain scalars, or vectors of them, we can't unpack
490                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
491                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
492                                 abi = field.abi;
493                             }
494                             // But scalar pairs are Rust-specific and get
495                             // treated as aggregates by C ABIs anyway.
496                             Abi::ScalarPair(..) => {
497                                 abi = field.abi;
498                             }
499                             _ => {}
500                         }
501                     }
502                 }
503
504                 // Two non-ZST fields, and they're both scalars.
505                 (
506                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })),
507                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })),
508                     None,
509                 ) => {
510                     // Order by the memory placement, not source order.
511                     let ((i, a), (j, b)) =
512                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
513                     let pair = self.scalar_pair(a, b);
514                     let pair_offsets = match pair.fields {
515                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
516                             assert_eq!(memory_index, &[0, 1]);
517                             offsets
518                         }
519                         _ => bug!(),
520                     };
521                     if offsets[i] == pair_offsets[0]
522                         && offsets[j] == pair_offsets[1]
523                         && align == pair.align
524                         && size == pair.size
525                     {
526                         // We can use `ScalarPair` only when it matches our
527                         // already computed layout (including `#[repr(C)]`).
528                         abi = pair.abi;
529                     }
530                 }
531
532                 _ => {}
533             }
534         }
535
536         if fields.iter().any(|f| f.abi.is_uninhabited()) {
537             abi = Abi::Uninhabited;
538         }
539
540         Ok(Layout {
541             variants: Variants::Single { index: VariantIdx::new(0) },
542             fields: FieldsShape::Arbitrary { offsets, memory_index },
543             abi,
544             largest_niche,
545             align,
546             size,
547         })
548     }
549
550     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
551         let tcx = self.tcx;
552         let param_env = self.param_env;
553         let dl = self.data_layout();
554         let scalar_unit = |value: Primitive| {
555             let size = value.size(dl);
556             assert!(size.bits() <= 128);
557             Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } }
558         };
559         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
560
561         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
562             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
563         };
564         debug_assert!(!ty.has_infer_types_or_consts());
565
566         Ok(match *ty.kind() {
567             // Basic scalars.
568             ty::Bool => tcx.intern_layout(Layout::scalar(
569                 self,
570                 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
571             )),
572             ty::Char => tcx.intern_layout(Layout::scalar(
573                 self,
574                 Scalar {
575                     value: Int(I32, false),
576                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
577                 },
578             )),
579             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
580             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
581             ty::Float(fty) => scalar(match fty {
582                 ty::FloatTy::F32 => F32,
583                 ty::FloatTy::F64 => F64,
584             }),
585             ty::FnPtr(_) => {
586                 let mut ptr = scalar_unit(Pointer);
587                 ptr.valid_range = ptr.valid_range.with_start(1);
588                 tcx.intern_layout(Layout::scalar(self, ptr))
589             }
590
591             // The never type.
592             ty::Never => tcx.intern_layout(Layout {
593                 variants: Variants::Single { index: VariantIdx::new(0) },
594                 fields: FieldsShape::Primitive,
595                 abi: Abi::Uninhabited,
596                 largest_niche: None,
597                 align: dl.i8_align,
598                 size: Size::ZERO,
599             }),
600
601             // Potentially-wide pointers.
602             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
603                 let mut data_ptr = scalar_unit(Pointer);
604                 if !ty.is_unsafe_ptr() {
605                     data_ptr.valid_range = data_ptr.valid_range.with_start(1);
606                 }
607
608                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
609                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
610                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
611                 }
612
613                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
614                 let metadata = match unsized_part.kind() {
615                     ty::Foreign(..) => {
616                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
617                     }
618                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
619                     ty::Dynamic(..) => {
620                         let mut vtable = scalar_unit(Pointer);
621                         vtable.valid_range = vtable.valid_range.with_start(1);
622                         vtable
623                     }
624                     _ => return Err(LayoutError::Unknown(unsized_part)),
625                 };
626
627                 // Effectively a (ptr, meta) tuple.
628                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
629             }
630
631             // Arrays and slices.
632             ty::Array(element, mut count) => {
633                 if count.has_projections() {
634                     count = tcx.normalize_erasing_regions(param_env, count);
635                     if count.has_projections() {
636                         return Err(LayoutError::Unknown(ty));
637                     }
638                 }
639
640                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
641                 let element = self.layout_of(element)?;
642                 let size =
643                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
644
645                 let abi =
646                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
647                         Abi::Uninhabited
648                     } else {
649                         Abi::Aggregate { sized: true }
650                     };
651
652                 let largest_niche = if count != 0 { element.largest_niche } else { None };
653
654                 tcx.intern_layout(Layout {
655                     variants: Variants::Single { index: VariantIdx::new(0) },
656                     fields: FieldsShape::Array { stride: element.size, count },
657                     abi,
658                     largest_niche,
659                     align: element.align,
660                     size,
661                 })
662             }
663             ty::Slice(element) => {
664                 let element = self.layout_of(element)?;
665                 tcx.intern_layout(Layout {
666                     variants: Variants::Single { index: VariantIdx::new(0) },
667                     fields: FieldsShape::Array { stride: element.size, count: 0 },
668                     abi: Abi::Aggregate { sized: false },
669                     largest_niche: None,
670                     align: element.align,
671                     size: Size::ZERO,
672                 })
673             }
674             ty::Str => tcx.intern_layout(Layout {
675                 variants: Variants::Single { index: VariantIdx::new(0) },
676                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
677                 abi: Abi::Aggregate { sized: false },
678                 largest_niche: None,
679                 align: dl.i8_align,
680                 size: Size::ZERO,
681             }),
682
683             // Odd unit types.
684             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
685             ty::Dynamic(..) | ty::Foreign(..) => {
686                 let mut unit = self.univariant_uninterned(
687                     ty,
688                     &[],
689                     &ReprOptions::default(),
690                     StructKind::AlwaysSized,
691                 )?;
692                 match unit.abi {
693                     Abi::Aggregate { ref mut sized } => *sized = false,
694                     _ => bug!(),
695                 }
696                 tcx.intern_layout(unit)
697             }
698
699             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
700
701             ty::Closure(_, ref substs) => {
702                 let tys = substs.as_closure().upvar_tys();
703                 univariant(
704                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
705                     &ReprOptions::default(),
706                     StructKind::AlwaysSized,
707                 )?
708             }
709
710             ty::Tuple(tys) => {
711                 let kind =
712                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
713
714                 univariant(
715                     &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
716                     &ReprOptions::default(),
717                     kind,
718                 )?
719             }
720
721             // SIMD vector types.
722             ty::Adt(def, substs) if def.repr.simd() => {
723                 if !def.is_struct() {
724                     // Should have yielded E0517 by now.
725                     tcx.sess.delay_span_bug(
726                         DUMMY_SP,
727                         "#[repr(simd)] was applied to an ADT that is not a struct",
728                     );
729                     return Err(LayoutError::Unknown(ty));
730                 }
731
732                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
733                 //
734                 // * #[repr(simd)] struct S(T, T, T, T);
735                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
736                 // * #[repr(simd)] struct S([T; 4])
737                 //
738                 // where T is a primitive scalar (integer/float/pointer).
739
740                 // SIMD vectors with zero fields are not supported.
741                 // (should be caught by typeck)
742                 if def.non_enum_variant().fields.is_empty() {
743                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
744                 }
745
746                 // Type of the first ADT field:
747                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
748
749                 // Heterogeneous SIMD vectors are not supported:
750                 // (should be caught by typeck)
751                 for fi in &def.non_enum_variant().fields {
752                     if fi.ty(tcx, substs) != f0_ty {
753                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
754                     }
755                 }
756
757                 // The element type and number of elements of the SIMD vector
758                 // are obtained from:
759                 //
760                 // * the element type and length of the single array field, if
761                 // the first field is of array type, or
762                 //
763                 // * the homogenous field type and the number of fields.
764                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
765                     // First ADT field is an array:
766
767                     // SIMD vectors with multiple array fields are not supported:
768                     // (should be caught by typeck)
769                     if def.non_enum_variant().fields.len() != 1 {
770                         tcx.sess.fatal(&format!(
771                             "monomorphising SIMD type `{}` with more than one array field",
772                             ty
773                         ));
774                     }
775
776                     // Extract the number of elements from the layout of the array field:
777                     let Ok(TyAndLayout {
778                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
779                         ..
780                     }) = self.layout_of(f0_ty) else {
781                         return Err(LayoutError::Unknown(ty));
782                     };
783
784                     (*e_ty, *count, true)
785                 } else {
786                     // First ADT field is not an array:
787                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
788                 };
789
790                 // SIMD vectors of zero length are not supported.
791                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
792                 // support.
793                 //
794                 // Can't be caught in typeck if the array length is generic.
795                 if e_len == 0 {
796                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
797                 } else if e_len > MAX_SIMD_LANES {
798                     tcx.sess.fatal(&format!(
799                         "monomorphising SIMD type `{}` of length greater than {}",
800                         ty, MAX_SIMD_LANES,
801                     ));
802                 }
803
804                 // Compute the ABI of the element type:
805                 let e_ly = self.layout_of(e_ty)?;
806                 let Abi::Scalar(e_abi) = e_ly.abi else {
807                     // This error isn't caught in typeck, e.g., if
808                     // the element type of the vector is generic.
809                     tcx.sess.fatal(&format!(
810                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
811                         (integer/float/pointer) element type `{}`",
812                         ty, e_ty
813                     ))
814                 };
815
816                 // Compute the size and alignment of the vector:
817                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
818                 let align = dl.vector_align(size);
819                 let size = size.align_to(align.abi);
820
821                 // Compute the placement of the vector fields:
822                 let fields = if is_array {
823                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
824                 } else {
825                     FieldsShape::Array { stride: e_ly.size, count: e_len }
826                 };
827
828                 tcx.intern_layout(Layout {
829                     variants: Variants::Single { index: VariantIdx::new(0) },
830                     fields,
831                     abi: Abi::Vector { element: e_abi, count: e_len },
832                     largest_niche: e_ly.largest_niche,
833                     size,
834                     align,
835                 })
836             }
837
838             // ADTs.
839             ty::Adt(def, substs) => {
840                 // Cache the field layouts.
841                 let variants = def
842                     .variants
843                     .iter()
844                     .map(|v| {
845                         v.fields
846                             .iter()
847                             .map(|field| self.layout_of(field.ty(tcx, substs)))
848                             .collect::<Result<Vec<_>, _>>()
849                     })
850                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
851
852                 if def.is_union() {
853                     if def.repr.pack.is_some() && def.repr.align.is_some() {
854                         self.tcx.sess.delay_span_bug(
855                             tcx.def_span(def.did),
856                             "union cannot be packed and aligned",
857                         );
858                         return Err(LayoutError::Unknown(ty));
859                     }
860
861                     let mut align =
862                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
863
864                     if let Some(repr_align) = def.repr.align {
865                         align = align.max(AbiAndPrefAlign::new(repr_align));
866                     }
867
868                     let optimize = !def.repr.inhibit_union_abi_opt();
869                     let mut size = Size::ZERO;
870                     let mut abi = Abi::Aggregate { sized: true };
871                     let index = VariantIdx::new(0);
872                     for field in &variants[index] {
873                         assert!(!field.is_unsized());
874                         align = align.max(field.align);
875
876                         // If all non-ZST fields have the same ABI, forward this ABI
877                         if optimize && !field.is_zst() {
878                             // Normalize scalar_unit to the maximal valid range
879                             let field_abi = match field.abi {
880                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
881                                 Abi::ScalarPair(x, y) => {
882                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
883                                 }
884                                 Abi::Vector { element: x, count } => {
885                                     Abi::Vector { element: scalar_unit(x.value), count }
886                                 }
887                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
888                                     Abi::Aggregate { sized: true }
889                                 }
890                             };
891
892                             if size == Size::ZERO {
893                                 // first non ZST: initialize 'abi'
894                                 abi = field_abi;
895                             } else if abi != field_abi {
896                                 // different fields have different ABI: reset to Aggregate
897                                 abi = Abi::Aggregate { sized: true };
898                             }
899                         }
900
901                         size = cmp::max(size, field.size);
902                     }
903
904                     if let Some(pack) = def.repr.pack {
905                         align = align.min(AbiAndPrefAlign::new(pack));
906                     }
907
908                     return Ok(tcx.intern_layout(Layout {
909                         variants: Variants::Single { index },
910                         fields: FieldsShape::Union(
911                             NonZeroUsize::new(variants[index].len())
912                                 .ok_or(LayoutError::Unknown(ty))?,
913                         ),
914                         abi,
915                         largest_niche: None,
916                         align,
917                         size: size.align_to(align.abi),
918                     }));
919                 }
920
921                 // A variant is absent if it's uninhabited and only has ZST fields.
922                 // Present uninhabited variants only require space for their fields,
923                 // but *not* an encoding of the discriminant (e.g., a tag value).
924                 // See issue #49298 for more details on the need to leave space
925                 // for non-ZST uninhabited data (mostly partial initialization).
926                 let absent = |fields: &[TyAndLayout<'_>]| {
927                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
928                     let is_zst = fields.iter().all(|f| f.is_zst());
929                     uninhabited && is_zst
930                 };
931                 let (present_first, present_second) = {
932                     let mut present_variants = variants
933                         .iter_enumerated()
934                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
935                     (present_variants.next(), present_variants.next())
936                 };
937                 let present_first = match present_first {
938                     Some(present_first) => present_first,
939                     // Uninhabited because it has no variants, or only absent ones.
940                     None if def.is_enum() => {
941                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
942                     }
943                     // If it's a struct, still compute a layout so that we can still compute the
944                     // field offsets.
945                     None => VariantIdx::new(0),
946                 };
947
948                 let is_struct = !def.is_enum() ||
949                     // Only one variant is present.
950                     (present_second.is_none() &&
951                     // Representation optimizations are allowed.
952                     !def.repr.inhibit_enum_layout_opt());
953                 if is_struct {
954                     // Struct, or univariant enum equivalent to a struct.
955                     // (Typechecking will reject discriminant-sizing attrs.)
956
957                     let v = present_first;
958                     let kind = if def.is_enum() || variants[v].is_empty() {
959                         StructKind::AlwaysSized
960                     } else {
961                         let param_env = tcx.param_env(def.did);
962                         let last_field = def.variants[v].fields.last().unwrap();
963                         let always_sized =
964                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
965                         if !always_sized {
966                             StructKind::MaybeUnsized
967                         } else {
968                             StructKind::AlwaysSized
969                         }
970                     };
971
972                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
973                     st.variants = Variants::Single { index: v };
974                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
975                     match st.abi {
976                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
977                             // the asserts ensure that we are not using the
978                             // `#[rustc_layout_scalar_valid_range(n)]`
979                             // attribute to widen the range of anything as that would probably
980                             // result in UB somewhere
981                             // FIXME(eddyb) the asserts are probably not needed,
982                             // as larger validity ranges would result in missed
983                             // optimizations, *not* wrongly assuming the inner
984                             // value is valid. e.g. unions enlarge validity ranges,
985                             // because the values may be uninitialized.
986                             if let Bound::Included(start) = start {
987                                 // FIXME(eddyb) this might be incorrect - it doesn't
988                                 // account for wrap-around (end < start) ranges.
989                                 assert!(scalar.valid_range.start <= start);
990                                 scalar.valid_range.start = start;
991                             }
992                             if let Bound::Included(end) = end {
993                                 // FIXME(eddyb) this might be incorrect - it doesn't
994                                 // account for wrap-around (end < start) ranges.
995                                 assert!(scalar.valid_range.end >= end);
996                                 scalar.valid_range.end = end;
997                             }
998
999                             // Update `largest_niche` if we have introduced a larger niche.
1000                             let niche = if def.repr.hide_niche() {
1001                                 None
1002                             } else {
1003                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1004                             };
1005                             if let Some(niche) = niche {
1006                                 match st.largest_niche {
1007                                     Some(largest_niche) => {
1008                                         // Replace the existing niche even if they're equal,
1009                                         // because this one is at a lower offset.
1010                                         if largest_niche.available(dl) <= niche.available(dl) {
1011                                             st.largest_niche = Some(niche);
1012                                         }
1013                                     }
1014                                     None => st.largest_niche = Some(niche),
1015                                 }
1016                             }
1017                         }
1018                         _ => assert!(
1019                             start == Bound::Unbounded && end == Bound::Unbounded,
1020                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1021                             def,
1022                             st,
1023                         ),
1024                     }
1025
1026                     return Ok(tcx.intern_layout(st));
1027                 }
1028
1029                 // At this point, we have handled all unions and
1030                 // structs. (We have also handled univariant enums
1031                 // that allow representation optimization.)
1032                 assert!(def.is_enum());
1033
1034                 // The current code for niche-filling relies on variant indices
1035                 // instead of actual discriminants, so dataful enums with
1036                 // explicit discriminants (RFC #2363) would misbehave.
1037                 let no_explicit_discriminants = def
1038                     .variants
1039                     .iter_enumerated()
1040                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1041
1042                 let mut niche_filling_layout = None;
1043
1044                 // Niche-filling enum optimization.
1045                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1046                     let mut dataful_variant = None;
1047                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1048
1049                     // Find one non-ZST variant.
1050                     'variants: for (v, fields) in variants.iter_enumerated() {
1051                         if absent(fields) {
1052                             continue 'variants;
1053                         }
1054                         for f in fields {
1055                             if !f.is_zst() {
1056                                 if dataful_variant.is_none() {
1057                                     dataful_variant = Some(v);
1058                                     continue 'variants;
1059                                 } else {
1060                                     dataful_variant = None;
1061                                     break 'variants;
1062                                 }
1063                             }
1064                         }
1065                         niche_variants = *niche_variants.start().min(&v)..=v;
1066                     }
1067
1068                     if niche_variants.start() > niche_variants.end() {
1069                         dataful_variant = None;
1070                     }
1071
1072                     if let Some(i) = dataful_variant {
1073                         let count = (niche_variants.end().as_u32()
1074                             - niche_variants.start().as_u32()
1075                             + 1) as u128;
1076
1077                         // Find the field with the largest niche
1078                         let niche_candidate = variants[i]
1079                             .iter()
1080                             .enumerate()
1081                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1082                             .max_by_key(|(_, niche)| niche.available(dl));
1083
1084                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1085                             niche_candidate.and_then(|(field_index, niche)| {
1086                                 Some((field_index, niche, niche.reserve(self, count)?))
1087                             })
1088                         {
1089                             let mut align = dl.aggregate_align;
1090                             let st = variants
1091                                 .iter_enumerated()
1092                                 .map(|(j, v)| {
1093                                     let mut st = self.univariant_uninterned(
1094                                         ty,
1095                                         v,
1096                                         &def.repr,
1097                                         StructKind::AlwaysSized,
1098                                     )?;
1099                                     st.variants = Variants::Single { index: j };
1100
1101                                     align = align.max(st.align);
1102
1103                                     Ok(st)
1104                                 })
1105                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1106
1107                             let offset = st[i].fields.offset(field_index) + niche.offset;
1108                             let size = st[i].size;
1109
1110                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1111                                 Abi::Uninhabited
1112                             } else {
1113                                 match st[i].abi {
1114                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1115                                     Abi::ScalarPair(first, second) => {
1116                                         // We need to use scalar_unit to reset the
1117                                         // valid range to the maximal one for that
1118                                         // primitive, because only the niche is
1119                                         // guaranteed to be initialised, not the
1120                                         // other primitive.
1121                                         if offset.bytes() == 0 {
1122                                             Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
1123                                         } else {
1124                                             Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
1125                                         }
1126                                     }
1127                                     _ => Abi::Aggregate { sized: true },
1128                                 }
1129                             };
1130
1131                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1132
1133                             niche_filling_layout = Some(Layout {
1134                                 variants: Variants::Multiple {
1135                                     tag: niche_scalar,
1136                                     tag_encoding: TagEncoding::Niche {
1137                                         dataful_variant: i,
1138                                         niche_variants,
1139                                         niche_start,
1140                                     },
1141                                     tag_field: 0,
1142                                     variants: st,
1143                                 },
1144                                 fields: FieldsShape::Arbitrary {
1145                                     offsets: vec![offset],
1146                                     memory_index: vec![0],
1147                                 },
1148                                 abi,
1149                                 largest_niche,
1150                                 size,
1151                                 align,
1152                             });
1153                         }
1154                     }
1155                 }
1156
1157                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1158                 let discr_type = def.repr.discr_type();
1159                 let bits = Integer::from_attr(self, discr_type).size().bits();
1160                 for (i, discr) in def.discriminants(tcx) {
1161                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1162                         continue;
1163                     }
1164                     let mut x = discr.val as i128;
1165                     if discr_type.is_signed() {
1166                         // sign extend the raw representation to be an i128
1167                         x = (x << (128 - bits)) >> (128 - bits);
1168                     }
1169                     if x < min {
1170                         min = x;
1171                     }
1172                     if x > max {
1173                         max = x;
1174                     }
1175                 }
1176                 // We might have no inhabited variants, so pretend there's at least one.
1177                 if (min, max) == (i128::MAX, i128::MIN) {
1178                     min = 0;
1179                     max = 0;
1180                 }
1181                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1182                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1183
1184                 let mut align = dl.aggregate_align;
1185                 let mut size = Size::ZERO;
1186
1187                 // We're interested in the smallest alignment, so start large.
1188                 let mut start_align = Align::from_bytes(256).unwrap();
1189                 assert_eq!(Integer::for_align(dl, start_align), None);
1190
1191                 // repr(C) on an enum tells us to make a (tag, union) layout,
1192                 // so we need to grow the prefix alignment to be at least
1193                 // the alignment of the union. (This value is used both for
1194                 // determining the alignment of the overall enum, and the
1195                 // determining the alignment of the payload after the tag.)
1196                 let mut prefix_align = min_ity.align(dl).abi;
1197                 if def.repr.c() {
1198                     for fields in &variants {
1199                         for field in fields {
1200                             prefix_align = prefix_align.max(field.align.abi);
1201                         }
1202                     }
1203                 }
1204
1205                 // Create the set of structs that represent each variant.
1206                 let mut layout_variants = variants
1207                     .iter_enumerated()
1208                     .map(|(i, field_layouts)| {
1209                         let mut st = self.univariant_uninterned(
1210                             ty,
1211                             &field_layouts,
1212                             &def.repr,
1213                             StructKind::Prefixed(min_ity.size(), prefix_align),
1214                         )?;
1215                         st.variants = Variants::Single { index: i };
1216                         // Find the first field we can't move later
1217                         // to make room for a larger discriminant.
1218                         for field in
1219                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1220                         {
1221                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1222                                 start_align = start_align.min(field.align.abi);
1223                                 break;
1224                             }
1225                         }
1226                         size = cmp::max(size, st.size);
1227                         align = align.max(st.align);
1228                         Ok(st)
1229                     })
1230                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1231
1232                 // Align the maximum variant size to the largest alignment.
1233                 size = size.align_to(align.abi);
1234
1235                 if size.bytes() >= dl.obj_size_bound() {
1236                     return Err(LayoutError::SizeOverflow(ty));
1237                 }
1238
1239                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1240                 if typeck_ity < min_ity {
1241                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1242                     // some reason at this point (based on values discriminant can take on). Mostly
1243                     // because this discriminant will be loaded, and then stored into variable of
1244                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1245                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1246                     // discriminant values. That would be a bug, because then, in codegen, in order
1247                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1248                     // space necessary to represent would have to be discarded (or layout is wrong
1249                     // on thinking it needs 16 bits)
1250                     bug!(
1251                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1252                         min_ity,
1253                         typeck_ity
1254                     );
1255                     // However, it is fine to make discr type however large (as an optimisation)
1256                     // after this point â€“ we’ll just truncate the value we load in codegen.
1257                 }
1258
1259                 // Check to see if we should use a different type for the
1260                 // discriminant. We can safely use a type with the same size
1261                 // as the alignment of the first field of each variant.
1262                 // We increase the size of the discriminant to avoid LLVM copying
1263                 // padding when it doesn't need to. This normally causes unaligned
1264                 // load/stores and excessive memcpy/memset operations. By using a
1265                 // bigger integer size, LLVM can be sure about its contents and
1266                 // won't be so conservative.
1267
1268                 // Use the initial field alignment
1269                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1270                     min_ity
1271                 } else {
1272                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1273                 };
1274
1275                 // If the alignment is not larger than the chosen discriminant size,
1276                 // don't use the alignment as the final size.
1277                 if ity <= min_ity {
1278                     ity = min_ity;
1279                 } else {
1280                     // Patch up the variants' first few fields.
1281                     let old_ity_size = min_ity.size();
1282                     let new_ity_size = ity.size();
1283                     for variant in &mut layout_variants {
1284                         match variant.fields {
1285                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1286                                 for i in offsets {
1287                                     if *i <= old_ity_size {
1288                                         assert_eq!(*i, old_ity_size);
1289                                         *i = new_ity_size;
1290                                     }
1291                                 }
1292                                 // We might be making the struct larger.
1293                                 if variant.size <= old_ity_size {
1294                                     variant.size = new_ity_size;
1295                                 }
1296                             }
1297                             _ => bug!(),
1298                         }
1299                     }
1300                 }
1301
1302                 let tag_mask = ity.size().unsigned_int_max();
1303                 let tag = Scalar {
1304                     value: Int(ity, signed),
1305                     valid_range: WrappingRange {
1306                         start: (min as u128 & tag_mask),
1307                         end: (max as u128 & tag_mask),
1308                     },
1309                 };
1310                 let mut abi = Abi::Aggregate { sized: true };
1311
1312                 // Without latter check aligned enums with custom discriminant values
1313                 // Would result in ICE see the issue #92464 for more info
1314                 if tag.value.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1315                     abi = Abi::Scalar(tag);
1316                 } else {
1317                     // Try to use a ScalarPair for all tagged enums.
1318                     let mut common_prim = None;
1319                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1320                         let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1321                             bug!();
1322                         };
1323                         let mut fields =
1324                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1325                         let (field, offset) = match (fields.next(), fields.next()) {
1326                             (None, None) => continue,
1327                             (Some(pair), None) => pair,
1328                             _ => {
1329                                 common_prim = None;
1330                                 break;
1331                             }
1332                         };
1333                         let prim = match field.abi {
1334                             Abi::Scalar(scalar) => scalar.value,
1335                             _ => {
1336                                 common_prim = None;
1337                                 break;
1338                             }
1339                         };
1340                         if let Some(pair) = common_prim {
1341                             // This is pretty conservative. We could go fancier
1342                             // by conflating things like i32 and u32, or even
1343                             // realising that (u8, u8) could just cohabit with
1344                             // u16 or even u32.
1345                             if pair != (prim, offset) {
1346                                 common_prim = None;
1347                                 break;
1348                             }
1349                         } else {
1350                             common_prim = Some((prim, offset));
1351                         }
1352                     }
1353                     if let Some((prim, offset)) = common_prim {
1354                         let pair = self.scalar_pair(tag, scalar_unit(prim));
1355                         let pair_offsets = match pair.fields {
1356                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1357                                 assert_eq!(memory_index, &[0, 1]);
1358                                 offsets
1359                             }
1360                             _ => bug!(),
1361                         };
1362                         if pair_offsets[0] == Size::ZERO
1363                             && pair_offsets[1] == *offset
1364                             && align == pair.align
1365                             && size == pair.size
1366                         {
1367                             // We can use `ScalarPair` only when it matches our
1368                             // already computed layout (including `#[repr(C)]`).
1369                             abi = pair.abi;
1370                         }
1371                     }
1372                 }
1373
1374                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1375                     abi = Abi::Uninhabited;
1376                 }
1377
1378                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1379
1380                 let tagged_layout = Layout {
1381                     variants: Variants::Multiple {
1382                         tag,
1383                         tag_encoding: TagEncoding::Direct,
1384                         tag_field: 0,
1385                         variants: layout_variants,
1386                     },
1387                     fields: FieldsShape::Arbitrary {
1388                         offsets: vec![Size::ZERO],
1389                         memory_index: vec![0],
1390                     },
1391                     largest_niche,
1392                     abi,
1393                     align,
1394                     size,
1395                 };
1396
1397                 let best_layout = match (tagged_layout, niche_filling_layout) {
1398                     (tagged_layout, Some(niche_filling_layout)) => {
1399                         // Pick the smaller layout; otherwise,
1400                         // pick the layout with the larger niche; otherwise,
1401                         // pick tagged as it has simpler codegen.
1402                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1403                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1404                             (layout.size, cmp::Reverse(niche_size))
1405                         })
1406                     }
1407                     (tagged_layout, None) => tagged_layout,
1408                 };
1409
1410                 tcx.intern_layout(best_layout)
1411             }
1412
1413             // Types with no meaningful known layout.
1414             ty::Projection(_) | ty::Opaque(..) => {
1415                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1416                 // if that was possible, so there's no reason to try again here.
1417                 return Err(LayoutError::Unknown(ty));
1418             }
1419
1420             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1421                 bug!("Layout::compute: unexpected type `{}`", ty)
1422             }
1423
1424             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1425                 return Err(LayoutError::Unknown(ty));
1426             }
1427         })
1428     }
1429 }
1430
1431 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1432 #[derive(Clone, Debug, PartialEq)]
1433 enum SavedLocalEligibility {
1434     Unassigned,
1435     Assigned(VariantIdx),
1436     // FIXME: Use newtype_index so we aren't wasting bytes
1437     Ineligible(Option<u32>),
1438 }
1439
1440 // When laying out generators, we divide our saved local fields into two
1441 // categories: overlap-eligible and overlap-ineligible.
1442 //
1443 // Those fields which are ineligible for overlap go in a "prefix" at the
1444 // beginning of the layout, and always have space reserved for them.
1445 //
1446 // Overlap-eligible fields are only assigned to one variant, so we lay
1447 // those fields out for each variant and put them right after the
1448 // prefix.
1449 //
1450 // Finally, in the layout details, we point to the fields from the
1451 // variants they are assigned to. It is possible for some fields to be
1452 // included in multiple variants. No field ever "moves around" in the
1453 // layout; its offset is always the same.
1454 //
1455 // Also included in the layout are the upvars and the discriminant.
1456 // These are included as fields on the "outer" layout; they are not part
1457 // of any variant.
1458 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1459     /// Compute the eligibility and assignment of each local.
1460     fn generator_saved_local_eligibility(
1461         &self,
1462         info: &GeneratorLayout<'tcx>,
1463     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1464         use SavedLocalEligibility::*;
1465
1466         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1467             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1468
1469         // The saved locals not eligible for overlap. These will get
1470         // "promoted" to the prefix of our generator.
1471         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1472
1473         // Figure out which of our saved locals are fields in only
1474         // one variant. The rest are deemed ineligible for overlap.
1475         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1476             for local in fields {
1477                 match assignments[*local] {
1478                     Unassigned => {
1479                         assignments[*local] = Assigned(variant_index);
1480                     }
1481                     Assigned(idx) => {
1482                         // We've already seen this local at another suspension
1483                         // point, so it is no longer a candidate.
1484                         trace!(
1485                             "removing local {:?} in >1 variant ({:?}, {:?})",
1486                             local,
1487                             variant_index,
1488                             idx
1489                         );
1490                         ineligible_locals.insert(*local);
1491                         assignments[*local] = Ineligible(None);
1492                     }
1493                     Ineligible(_) => {}
1494                 }
1495             }
1496         }
1497
1498         // Next, check every pair of eligible locals to see if they
1499         // conflict.
1500         for local_a in info.storage_conflicts.rows() {
1501             let conflicts_a = info.storage_conflicts.count(local_a);
1502             if ineligible_locals.contains(local_a) {
1503                 continue;
1504             }
1505
1506             for local_b in info.storage_conflicts.iter(local_a) {
1507                 // local_a and local_b are storage live at the same time, therefore they
1508                 // cannot overlap in the generator layout. The only way to guarantee
1509                 // this is if they are in the same variant, or one is ineligible
1510                 // (which means it is stored in every variant).
1511                 if ineligible_locals.contains(local_b)
1512                     || assignments[local_a] == assignments[local_b]
1513                 {
1514                     continue;
1515                 }
1516
1517                 // If they conflict, we will choose one to make ineligible.
1518                 // This is not always optimal; it's just a greedy heuristic that
1519                 // seems to produce good results most of the time.
1520                 let conflicts_b = info.storage_conflicts.count(local_b);
1521                 let (remove, other) =
1522                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1523                 ineligible_locals.insert(remove);
1524                 assignments[remove] = Ineligible(None);
1525                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1526             }
1527         }
1528
1529         // Count the number of variants in use. If only one of them, then it is
1530         // impossible to overlap any locals in our layout. In this case it's
1531         // always better to make the remaining locals ineligible, so we can
1532         // lay them out with the other locals in the prefix and eliminate
1533         // unnecessary padding bytes.
1534         {
1535             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1536             for assignment in &assignments {
1537                 if let Assigned(idx) = assignment {
1538                     used_variants.insert(*idx);
1539                 }
1540             }
1541             if used_variants.count() < 2 {
1542                 for assignment in assignments.iter_mut() {
1543                     *assignment = Ineligible(None);
1544                 }
1545                 ineligible_locals.insert_all();
1546             }
1547         }
1548
1549         // Write down the order of our locals that will be promoted to the prefix.
1550         {
1551             for (idx, local) in ineligible_locals.iter().enumerate() {
1552                 assignments[local] = Ineligible(Some(idx as u32));
1553             }
1554         }
1555         debug!("generator saved local assignments: {:?}", assignments);
1556
1557         (ineligible_locals, assignments)
1558     }
1559
1560     /// Compute the full generator layout.
1561     fn generator_layout(
1562         &self,
1563         ty: Ty<'tcx>,
1564         def_id: hir::def_id::DefId,
1565         substs: SubstsRef<'tcx>,
1566     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1567         use SavedLocalEligibility::*;
1568         let tcx = self.tcx;
1569         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1570
1571         let Some(info) = tcx.generator_layout(def_id) else {
1572             return Err(LayoutError::Unknown(ty));
1573         };
1574         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1575
1576         // Build a prefix layout, including "promoting" all ineligible
1577         // locals as part of the prefix. We compute the layout of all of
1578         // these fields at once to get optimal packing.
1579         let tag_index = substs.as_generator().prefix_tys().count();
1580
1581         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1582         let max_discr = (info.variant_fields.len() - 1) as u128;
1583         let discr_int = Integer::fit_unsigned(max_discr);
1584         let discr_int_ty = discr_int.to_ty(tcx, false);
1585         let tag = Scalar {
1586             value: Primitive::Int(discr_int, false),
1587             valid_range: WrappingRange { start: 0, end: max_discr },
1588         };
1589         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag));
1590         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1591
1592         let promoted_layouts = ineligible_locals
1593             .iter()
1594             .map(|local| subst_field(info.field_tys[local]))
1595             .map(|ty| tcx.mk_maybe_uninit(ty))
1596             .map(|ty| self.layout_of(ty));
1597         let prefix_layouts = substs
1598             .as_generator()
1599             .prefix_tys()
1600             .map(|ty| self.layout_of(ty))
1601             .chain(iter::once(Ok(tag_layout)))
1602             .chain(promoted_layouts)
1603             .collect::<Result<Vec<_>, _>>()?;
1604         let prefix = self.univariant_uninterned(
1605             ty,
1606             &prefix_layouts,
1607             &ReprOptions::default(),
1608             StructKind::AlwaysSized,
1609         )?;
1610
1611         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1612
1613         // Split the prefix layout into the "outer" fields (upvars and
1614         // discriminant) and the "promoted" fields. Promoted fields will
1615         // get included in each variant that requested them in
1616         // GeneratorLayout.
1617         debug!("prefix = {:#?}", prefix);
1618         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1619             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1620                 let mut inverse_memory_index = invert_mapping(&memory_index);
1621
1622                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1623                 // "outer" and "promoted" fields respectively.
1624                 let b_start = (tag_index + 1) as u32;
1625                 let offsets_b = offsets.split_off(b_start as usize);
1626                 let offsets_a = offsets;
1627
1628                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1629                 // by preserving the order but keeping only one disjoint "half" each.
1630                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1631                 let inverse_memory_index_b: Vec<_> =
1632                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1633                 inverse_memory_index.retain(|&i| i < b_start);
1634                 let inverse_memory_index_a = inverse_memory_index;
1635
1636                 // Since `inverse_memory_index_{a,b}` each only refer to their
1637                 // respective fields, they can be safely inverted
1638                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1639                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1640
1641                 let outer_fields =
1642                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1643                 (outer_fields, offsets_b, memory_index_b)
1644             }
1645             _ => bug!(),
1646         };
1647
1648         let mut size = prefix.size;
1649         let mut align = prefix.align;
1650         let variants = info
1651             .variant_fields
1652             .iter_enumerated()
1653             .map(|(index, variant_fields)| {
1654                 // Only include overlap-eligible fields when we compute our variant layout.
1655                 let variant_only_tys = variant_fields
1656                     .iter()
1657                     .filter(|local| match assignments[**local] {
1658                         Unassigned => bug!(),
1659                         Assigned(v) if v == index => true,
1660                         Assigned(_) => bug!("assignment does not match variant"),
1661                         Ineligible(_) => false,
1662                     })
1663                     .map(|local| subst_field(info.field_tys[*local]));
1664
1665                 let mut variant = self.univariant_uninterned(
1666                     ty,
1667                     &variant_only_tys
1668                         .map(|ty| self.layout_of(ty))
1669                         .collect::<Result<Vec<_>, _>>()?,
1670                     &ReprOptions::default(),
1671                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1672                 )?;
1673                 variant.variants = Variants::Single { index };
1674
1675                 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1676                     bug!();
1677                 };
1678
1679                 // Now, stitch the promoted and variant-only fields back together in
1680                 // the order they are mentioned by our GeneratorLayout.
1681                 // Because we only use some subset (that can differ between variants)
1682                 // of the promoted fields, we can't just pick those elements of the
1683                 // `promoted_memory_index` (as we'd end up with gaps).
1684                 // So instead, we build an "inverse memory_index", as if all of the
1685                 // promoted fields were being used, but leave the elements not in the
1686                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1687                 // obtain a valid (bijective) mapping.
1688                 const INVALID_FIELD_IDX: u32 = !0;
1689                 let mut combined_inverse_memory_index =
1690                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1691                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1692                 let combined_offsets = variant_fields
1693                     .iter()
1694                     .enumerate()
1695                     .map(|(i, local)| {
1696                         let (offset, memory_index) = match assignments[*local] {
1697                             Unassigned => bug!(),
1698                             Assigned(_) => {
1699                                 let (offset, memory_index) =
1700                                     offsets_and_memory_index.next().unwrap();
1701                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1702                             }
1703                             Ineligible(field_idx) => {
1704                                 let field_idx = field_idx.unwrap() as usize;
1705                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1706                             }
1707                         };
1708                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1709                         offset
1710                     })
1711                     .collect();
1712
1713                 // Remove the unused slots and invert the mapping to obtain the
1714                 // combined `memory_index` (also see previous comment).
1715                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1716                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1717
1718                 variant.fields = FieldsShape::Arbitrary {
1719                     offsets: combined_offsets,
1720                     memory_index: combined_memory_index,
1721                 };
1722
1723                 size = size.max(variant.size);
1724                 align = align.max(variant.align);
1725                 Ok(variant)
1726             })
1727             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1728
1729         size = size.align_to(align.abi);
1730
1731         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1732         {
1733             Abi::Uninhabited
1734         } else {
1735             Abi::Aggregate { sized: true }
1736         };
1737
1738         let layout = tcx.intern_layout(Layout {
1739             variants: Variants::Multiple {
1740                 tag,
1741                 tag_encoding: TagEncoding::Direct,
1742                 tag_field: tag_index,
1743                 variants,
1744             },
1745             fields: outer_fields,
1746             abi,
1747             largest_niche: prefix.largest_niche,
1748             size,
1749             align,
1750         });
1751         debug!("generator layout ({:?}): {:#?}", ty, layout);
1752         Ok(layout)
1753     }
1754
1755     /// This is invoked by the `layout_of` query to record the final
1756     /// layout of each type.
1757     #[inline(always)]
1758     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1759         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1760         // for dumping later.
1761         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1762             self.record_layout_for_printing_outlined(layout)
1763         }
1764     }
1765
1766     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1767         // Ignore layouts that are done with non-empty environments or
1768         // non-monomorphic layouts, as the user only wants to see the stuff
1769         // resulting from the final codegen session.
1770         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1771             return;
1772         }
1773
1774         // (delay format until we actually need it)
1775         let record = |kind, packed, opt_discr_size, variants| {
1776             let type_desc = format!("{:?}", layout.ty);
1777             self.tcx.sess.code_stats.record_type_size(
1778                 kind,
1779                 type_desc,
1780                 layout.align.abi,
1781                 layout.size,
1782                 packed,
1783                 opt_discr_size,
1784                 variants,
1785             );
1786         };
1787
1788         let adt_def = match *layout.ty.kind() {
1789             ty::Adt(ref adt_def, _) => {
1790                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1791                 adt_def
1792             }
1793
1794             ty::Closure(..) => {
1795                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1796                 record(DataTypeKind::Closure, false, None, vec![]);
1797                 return;
1798             }
1799
1800             _ => {
1801                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1802                 return;
1803             }
1804         };
1805
1806         let adt_kind = adt_def.adt_kind();
1807         let adt_packed = adt_def.repr.pack.is_some();
1808
1809         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1810             let mut min_size = Size::ZERO;
1811             let field_info: Vec<_> = flds
1812                 .iter()
1813                 .enumerate()
1814                 .map(|(i, &name)| {
1815                     let field_layout = layout.field(self, i);
1816                     let offset = layout.fields.offset(i);
1817                     let field_end = offset + field_layout.size;
1818                     if min_size < field_end {
1819                         min_size = field_end;
1820                     }
1821                     FieldInfo {
1822                         name: name.to_string(),
1823                         offset: offset.bytes(),
1824                         size: field_layout.size.bytes(),
1825                         align: field_layout.align.abi.bytes(),
1826                     }
1827                 })
1828                 .collect();
1829
1830             VariantInfo {
1831                 name: n.map(|n| n.to_string()),
1832                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1833                 align: layout.align.abi.bytes(),
1834                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1835                 fields: field_info,
1836             }
1837         };
1838
1839         match layout.variants {
1840             Variants::Single { index } => {
1841                 if !adt_def.variants.is_empty() && layout.fields != FieldsShape::Primitive {
1842                     debug!(
1843                         "print-type-size `{:#?}` variant {}",
1844                         layout, adt_def.variants[index].name
1845                     );
1846                     let variant_def = &adt_def.variants[index];
1847                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1848                     record(
1849                         adt_kind.into(),
1850                         adt_packed,
1851                         None,
1852                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1853                     );
1854                 } else {
1855                     // (This case arises for *empty* enums; so give it
1856                     // zero variants.)
1857                     record(adt_kind.into(), adt_packed, None, vec![]);
1858                 }
1859             }
1860
1861             Variants::Multiple { tag, ref tag_encoding, .. } => {
1862                 debug!(
1863                     "print-type-size `{:#?}` adt general variants def {}",
1864                     layout.ty,
1865                     adt_def.variants.len()
1866                 );
1867                 let variant_infos: Vec<_> = adt_def
1868                     .variants
1869                     .iter_enumerated()
1870                     .map(|(i, variant_def)| {
1871                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1872                         build_variant_info(
1873                             Some(variant_def.name),
1874                             &fields,
1875                             layout.for_variant(self, i),
1876                         )
1877                     })
1878                     .collect();
1879                 record(
1880                     adt_kind.into(),
1881                     adt_packed,
1882                     match tag_encoding {
1883                         TagEncoding::Direct => Some(tag.value.size(self)),
1884                         _ => None,
1885                     },
1886                     variant_infos,
1887                 );
1888             }
1889         }
1890     }
1891 }
1892
1893 /// Type size "skeleton", i.e., the only information determining a type's size.
1894 /// While this is conservative, (aside from constant sizes, only pointers,
1895 /// newtypes thereof and null pointer optimized enums are allowed), it is
1896 /// enough to statically check common use cases of transmute.
1897 #[derive(Copy, Clone, Debug)]
1898 pub enum SizeSkeleton<'tcx> {
1899     /// Any statically computable Layout.
1900     Known(Size),
1901
1902     /// A potentially-fat pointer.
1903     Pointer {
1904         /// If true, this pointer is never null.
1905         non_zero: bool,
1906         /// The type which determines the unsized metadata, if any,
1907         /// of this pointer. Either a type parameter or a projection
1908         /// depending on one, with regions erased.
1909         tail: Ty<'tcx>,
1910     },
1911 }
1912
1913 impl<'tcx> SizeSkeleton<'tcx> {
1914     pub fn compute(
1915         ty: Ty<'tcx>,
1916         tcx: TyCtxt<'tcx>,
1917         param_env: ty::ParamEnv<'tcx>,
1918     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1919         debug_assert!(!ty.has_infer_types_or_consts());
1920
1921         // First try computing a static layout.
1922         let err = match tcx.layout_of(param_env.and(ty)) {
1923             Ok(layout) => {
1924                 return Ok(SizeSkeleton::Known(layout.size));
1925             }
1926             Err(err) => err,
1927         };
1928
1929         match *ty.kind() {
1930             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1931                 let non_zero = !ty.is_unsafe_ptr();
1932                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1933                 match tail.kind() {
1934                     ty::Param(_) | ty::Projection(_) => {
1935                         debug_assert!(tail.has_param_types_or_consts());
1936                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1937                     }
1938                     _ => bug!(
1939                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1940                               tail `{}` is not a type parameter or a projection",
1941                         ty,
1942                         err,
1943                         tail
1944                     ),
1945                 }
1946             }
1947
1948             ty::Adt(def, substs) => {
1949                 // Only newtypes and enums w/ nullable pointer optimization.
1950                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1951                     return Err(err);
1952                 }
1953
1954                 // Get a zero-sized variant or a pointer newtype.
1955                 let zero_or_ptr_variant = |i| {
1956                     let i = VariantIdx::new(i);
1957                     let fields = def.variants[i]
1958                         .fields
1959                         .iter()
1960                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1961                     let mut ptr = None;
1962                     for field in fields {
1963                         let field = field?;
1964                         match field {
1965                             SizeSkeleton::Known(size) => {
1966                                 if size.bytes() > 0 {
1967                                     return Err(err);
1968                                 }
1969                             }
1970                             SizeSkeleton::Pointer { .. } => {
1971                                 if ptr.is_some() {
1972                                     return Err(err);
1973                                 }
1974                                 ptr = Some(field);
1975                             }
1976                         }
1977                     }
1978                     Ok(ptr)
1979                 };
1980
1981                 let v0 = zero_or_ptr_variant(0)?;
1982                 // Newtype.
1983                 if def.variants.len() == 1 {
1984                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1985                         return Ok(SizeSkeleton::Pointer {
1986                             non_zero: non_zero
1987                                 || match tcx.layout_scalar_valid_range(def.did) {
1988                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1989                                     (Bound::Included(start), Bound::Included(end)) => {
1990                                         0 < start && start < end
1991                                     }
1992                                     _ => false,
1993                                 },
1994                             tail,
1995                         });
1996                     } else {
1997                         return Err(err);
1998                     }
1999                 }
2000
2001                 let v1 = zero_or_ptr_variant(1)?;
2002                 // Nullable pointer enum optimization.
2003                 match (v0, v1) {
2004                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2005                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2006                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2007                     }
2008                     _ => Err(err),
2009                 }
2010             }
2011
2012             ty::Projection(_) | ty::Opaque(..) => {
2013                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2014                 if ty == normalized {
2015                     Err(err)
2016                 } else {
2017                     SizeSkeleton::compute(normalized, tcx, param_env)
2018                 }
2019             }
2020
2021             _ => Err(err),
2022         }
2023     }
2024
2025     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2026         match (self, other) {
2027             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2028             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2029                 a == b
2030             }
2031             _ => false,
2032         }
2033     }
2034 }
2035
2036 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2037     fn tcx(&self) -> TyCtxt<'tcx>;
2038 }
2039
2040 pub trait HasParamEnv<'tcx> {
2041     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2042 }
2043
2044 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2045     #[inline]
2046     fn data_layout(&self) -> &TargetDataLayout {
2047         &self.data_layout
2048     }
2049 }
2050
2051 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2052     fn target_spec(&self) -> &Target {
2053         &self.sess.target
2054     }
2055 }
2056
2057 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2058     #[inline]
2059     fn tcx(&self) -> TyCtxt<'tcx> {
2060         *self
2061     }
2062 }
2063
2064 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2065     #[inline]
2066     fn data_layout(&self) -> &TargetDataLayout {
2067         &self.data_layout
2068     }
2069 }
2070
2071 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2072     fn target_spec(&self) -> &Target {
2073         &self.sess.target
2074     }
2075 }
2076
2077 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2078     #[inline]
2079     fn tcx(&self) -> TyCtxt<'tcx> {
2080         **self
2081     }
2082 }
2083
2084 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2085     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2086         self.param_env
2087     }
2088 }
2089
2090 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2091     fn data_layout(&self) -> &TargetDataLayout {
2092         self.tcx.data_layout()
2093     }
2094 }
2095
2096 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2097     fn target_spec(&self) -> &Target {
2098         self.tcx.target_spec()
2099     }
2100 }
2101
2102 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2103     fn tcx(&self) -> TyCtxt<'tcx> {
2104         self.tcx.tcx()
2105     }
2106 }
2107
2108 pub trait MaybeResult<T> {
2109     type Error;
2110
2111     fn from(x: Result<T, Self::Error>) -> Self;
2112     fn to_result(self) -> Result<T, Self::Error>;
2113 }
2114
2115 impl<T> MaybeResult<T> for T {
2116     type Error = !;
2117
2118     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2119         x
2120     }
2121     fn to_result(self) -> Result<T, Self::Error> {
2122         Ok(self)
2123     }
2124 }
2125
2126 impl<T, E> MaybeResult<T> for Result<T, E> {
2127     type Error = E;
2128
2129     fn from(x: Result<T, Self::Error>) -> Self {
2130         x
2131     }
2132     fn to_result(self) -> Result<T, Self::Error> {
2133         self
2134     }
2135 }
2136
2137 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2138
2139 /// Trait for contexts that want to be able to compute layouts of types.
2140 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2141 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2142     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2143     /// returned from `layout_of` (see also `handle_layout_err`).
2144     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2145
2146     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2147     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2148     #[inline]
2149     fn layout_tcx_at_span(&self) -> Span {
2150         DUMMY_SP
2151     }
2152
2153     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2154     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2155     ///
2156     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2157     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2158     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2159     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2160     fn handle_layout_err(
2161         &self,
2162         err: LayoutError<'tcx>,
2163         span: Span,
2164         ty: Ty<'tcx>,
2165     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2166 }
2167
2168 /// Blanket extension trait for contexts that can compute layouts of types.
2169 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2170     /// Computes the layout of a type. Note that this implicitly
2171     /// executes in "reveal all" mode, and will normalize the input type.
2172     #[inline]
2173     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2174         self.spanned_layout_of(ty, DUMMY_SP)
2175     }
2176
2177     /// Computes the layout of a type, at `span`. Note that this implicitly
2178     /// executes in "reveal all" mode, and will normalize the input type.
2179     // FIXME(eddyb) avoid passing information like this, and instead add more
2180     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2181     #[inline]
2182     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2183         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2184         let tcx = self.tcx().at(span);
2185
2186         MaybeResult::from(
2187             tcx.layout_of(self.param_env().and(ty))
2188                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2189         )
2190     }
2191 }
2192
2193 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2194
2195 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2196     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2197
2198     #[inline]
2199     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2200         err
2201     }
2202 }
2203
2204 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2205     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2206
2207     #[inline]
2208     fn layout_tcx_at_span(&self) -> Span {
2209         self.tcx.span
2210     }
2211
2212     #[inline]
2213     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2214         err
2215     }
2216 }
2217
2218 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2219 where
2220     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2221 {
2222     fn ty_and_layout_for_variant(
2223         this: TyAndLayout<'tcx>,
2224         cx: &C,
2225         variant_index: VariantIdx,
2226     ) -> TyAndLayout<'tcx> {
2227         let layout = match this.variants {
2228             Variants::Single { index }
2229                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2230                 if index == variant_index &&
2231                 // Don't confuse variants of uninhabited enums with the enum itself.
2232                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2233                 this.fields != FieldsShape::Primitive =>
2234             {
2235                 this.layout
2236             }
2237
2238             Variants::Single { index } => {
2239                 let tcx = cx.tcx();
2240                 let param_env = cx.param_env();
2241
2242                 // Deny calling for_variant more than once for non-Single enums.
2243                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2244                     assert_eq!(original_layout.variants, Variants::Single { index });
2245                 }
2246
2247                 let fields = match this.ty.kind() {
2248                     ty::Adt(def, _) if def.variants.is_empty() =>
2249                         bug!("for_variant called on zero-variant enum"),
2250                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2251                     _ => bug!(),
2252                 };
2253                 tcx.intern_layout(Layout {
2254                     variants: Variants::Single { index: variant_index },
2255                     fields: match NonZeroUsize::new(fields) {
2256                         Some(fields) => FieldsShape::Union(fields),
2257                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2258                     },
2259                     abi: Abi::Uninhabited,
2260                     largest_niche: None,
2261                     align: tcx.data_layout.i8_align,
2262                     size: Size::ZERO,
2263                 })
2264             }
2265
2266             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2267         };
2268
2269         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2270
2271         TyAndLayout { ty: this.ty, layout }
2272     }
2273
2274     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2275         enum TyMaybeWithLayout<'tcx> {
2276             Ty(Ty<'tcx>),
2277             TyAndLayout(TyAndLayout<'tcx>),
2278         }
2279
2280         fn field_ty_or_layout<'tcx>(
2281             this: TyAndLayout<'tcx>,
2282             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2283             i: usize,
2284         ) -> TyMaybeWithLayout<'tcx> {
2285             let tcx = cx.tcx();
2286             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2287                 let layout = Layout::scalar(cx, tag);
2288                 TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
2289             };
2290
2291             match *this.ty.kind() {
2292                 ty::Bool
2293                 | ty::Char
2294                 | ty::Int(_)
2295                 | ty::Uint(_)
2296                 | ty::Float(_)
2297                 | ty::FnPtr(_)
2298                 | ty::Never
2299                 | ty::FnDef(..)
2300                 | ty::GeneratorWitness(..)
2301                 | ty::Foreign(..)
2302                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2303
2304                 // Potentially-fat pointers.
2305                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2306                     assert!(i < this.fields.count());
2307
2308                     // Reuse the fat `*T` type as its own thin pointer data field.
2309                     // This provides information about, e.g., DST struct pointees
2310                     // (which may have no non-DST form), and will work as long
2311                     // as the `Abi` or `FieldsShape` is checked by users.
2312                     if i == 0 {
2313                         let nil = tcx.mk_unit();
2314                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2315                             tcx.mk_mut_ptr(nil)
2316                         } else {
2317                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2318                         };
2319
2320                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2321                         // the `Result` should always work because the type is
2322                         // always either `*mut ()` or `&'static mut ()`.
2323                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2324                             ty: this.ty,
2325                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2326                         });
2327                     }
2328
2329                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2330                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2331                         ty::Dynamic(_, _) => {
2332                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2333                                 tcx.lifetimes.re_static,
2334                                 tcx.mk_array(tcx.types.usize, 3),
2335                             ))
2336                             /* FIXME: use actual fn pointers
2337                             Warning: naively computing the number of entries in the
2338                             vtable by counting the methods on the trait + methods on
2339                             all parent traits does not work, because some methods can
2340                             be not object safe and thus excluded from the vtable.
2341                             Increase this counter if you tried to implement this but
2342                             failed to do it without duplicating a lot of code from
2343                             other places in the compiler: 2
2344                             tcx.mk_tup(&[
2345                                 tcx.mk_array(tcx.types.usize, 3),
2346                                 tcx.mk_array(Option<fn()>),
2347                             ])
2348                             */
2349                         }
2350                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2351                     }
2352                 }
2353
2354                 // Arrays and slices.
2355                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2356                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2357
2358                 // Tuples, generators and closures.
2359                 ty::Closure(_, ref substs) => field_ty_or_layout(
2360                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2361                     cx,
2362                     i,
2363                 ),
2364
2365                 ty::Generator(def_id, ref substs, _) => match this.variants {
2366                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2367                         substs
2368                             .as_generator()
2369                             .state_tys(def_id, tcx)
2370                             .nth(index.as_usize())
2371                             .unwrap()
2372                             .nth(i)
2373                             .unwrap(),
2374                     ),
2375                     Variants::Multiple { tag, tag_field, .. } => {
2376                         if i == tag_field {
2377                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2378                         }
2379                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2380                     }
2381                 },
2382
2383                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2384
2385                 // ADTs.
2386                 ty::Adt(def, substs) => {
2387                     match this.variants {
2388                         Variants::Single { index } => {
2389                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2390                         }
2391
2392                         // Discriminant field for enums (where applicable).
2393                         Variants::Multiple { tag, .. } => {
2394                             assert_eq!(i, 0);
2395                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2396                         }
2397                     }
2398                 }
2399
2400                 ty::Projection(_)
2401                 | ty::Bound(..)
2402                 | ty::Placeholder(..)
2403                 | ty::Opaque(..)
2404                 | ty::Param(_)
2405                 | ty::Infer(_)
2406                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2407             }
2408         }
2409
2410         match field_ty_or_layout(this, cx, i) {
2411             TyMaybeWithLayout::Ty(field_ty) => {
2412                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2413                     bug!(
2414                         "failed to get layout for `{}`: {},\n\
2415                          despite it being a field (#{}) of an existing layout: {:#?}",
2416                         field_ty,
2417                         e,
2418                         i,
2419                         this
2420                     )
2421                 })
2422             }
2423             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2424         }
2425     }
2426
2427     fn ty_and_layout_pointee_info_at(
2428         this: TyAndLayout<'tcx>,
2429         cx: &C,
2430         offset: Size,
2431     ) -> Option<PointeeInfo> {
2432         let tcx = cx.tcx();
2433         let param_env = cx.param_env();
2434
2435         let addr_space_of_ty = |ty: Ty<'tcx>| {
2436             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2437         };
2438
2439         let pointee_info = match *this.ty.kind() {
2440             ty::RawPtr(mt) if offset.bytes() == 0 => {
2441                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2442                     size: layout.size,
2443                     align: layout.align.abi,
2444                     safe: None,
2445                     address_space: addr_space_of_ty(mt.ty),
2446                 })
2447             }
2448             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2449                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2450                     size: layout.size,
2451                     align: layout.align.abi,
2452                     safe: None,
2453                     address_space: cx.data_layout().instruction_address_space,
2454                 })
2455             }
2456             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2457                 let address_space = addr_space_of_ty(ty);
2458                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2459                     // Use conservative pointer kind if not optimizing. This saves us the
2460                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2461                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2462                     PointerKind::Shared
2463                 } else {
2464                     match mt {
2465                         hir::Mutability::Not => {
2466                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2467                                 PointerKind::Frozen
2468                             } else {
2469                                 PointerKind::Shared
2470                             }
2471                         }
2472                         hir::Mutability::Mut => {
2473                             // References to self-referential structures should not be considered
2474                             // noalias, as another pointer to the structure can be obtained, that
2475                             // is not based-on the original reference. We consider all !Unpin
2476                             // types to be potentially self-referential here.
2477                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2478                                 PointerKind::UniqueBorrowed
2479                             } else {
2480                                 PointerKind::Shared
2481                             }
2482                         }
2483                     }
2484                 };
2485
2486                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2487                     size: layout.size,
2488                     align: layout.align.abi,
2489                     safe: Some(kind),
2490                     address_space,
2491                 })
2492             }
2493
2494             _ => {
2495                 let mut data_variant = match this.variants {
2496                     // Within the discriminant field, only the niche itself is
2497                     // always initialized, so we only check for a pointer at its
2498                     // offset.
2499                     //
2500                     // If the niche is a pointer, it's either valid (according
2501                     // to its type), or null (which the niche field's scalar
2502                     // validity range encodes).  This allows using
2503                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2504                     // this will continue to work as long as we don't start
2505                     // using more niches than just null (e.g., the first page of
2506                     // the address space, or unaligned pointers).
2507                     Variants::Multiple {
2508                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2509                         tag_field,
2510                         ..
2511                     } if this.fields.offset(tag_field) == offset => {
2512                         Some(this.for_variant(cx, dataful_variant))
2513                     }
2514                     _ => Some(this),
2515                 };
2516
2517                 if let Some(variant) = data_variant {
2518                     // We're not interested in any unions.
2519                     if let FieldsShape::Union(_) = variant.fields {
2520                         data_variant = None;
2521                     }
2522                 }
2523
2524                 let mut result = None;
2525
2526                 if let Some(variant) = data_variant {
2527                     let ptr_end = offset + Pointer.size(cx);
2528                     for i in 0..variant.fields.count() {
2529                         let field_start = variant.fields.offset(i);
2530                         if field_start <= offset {
2531                             let field = variant.field(cx, i);
2532                             result = field.to_result().ok().and_then(|field| {
2533                                 if ptr_end <= field_start + field.size {
2534                                     // We found the right field, look inside it.
2535                                     let field_info =
2536                                         field.pointee_info_at(cx, offset - field_start);
2537                                     field_info
2538                                 } else {
2539                                     None
2540                                 }
2541                             });
2542                             if result.is_some() {
2543                                 break;
2544                             }
2545                         }
2546                     }
2547                 }
2548
2549                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2550                 if let Some(ref mut pointee) = result {
2551                     if let ty::Adt(def, _) = this.ty.kind() {
2552                         if def.is_box() && offset.bytes() == 0 {
2553                             pointee.safe = Some(PointerKind::UniqueOwned);
2554                         }
2555                     }
2556                 }
2557
2558                 result
2559             }
2560         };
2561
2562         debug!(
2563             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2564             offset,
2565             this.ty.kind(),
2566             pointee_info
2567         );
2568
2569         pointee_info
2570     }
2571 }
2572
2573 impl<'tcx> ty::Instance<'tcx> {
2574     // NOTE(eddyb) this is private to avoid using it from outside of
2575     // `fn_abi_of_instance` - any other uses are either too high-level
2576     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2577     // or should go through `FnAbi` instead, to avoid losing any
2578     // adjustments `fn_abi_of_instance` might be performing.
2579     fn fn_sig_for_fn_abi(
2580         &self,
2581         tcx: TyCtxt<'tcx>,
2582         param_env: ty::ParamEnv<'tcx>,
2583     ) -> ty::PolyFnSig<'tcx> {
2584         let ty = self.ty(tcx, param_env);
2585         match *ty.kind() {
2586             ty::FnDef(..) => {
2587                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2588                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2589                 // (i.e. due to being inside a projection that got normalized, see
2590                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2591                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2592                 let mut sig = match *ty.kind() {
2593                     ty::FnDef(def_id, substs) => tcx
2594                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2595                         .subst(tcx, substs),
2596                     _ => unreachable!(),
2597                 };
2598
2599                 if let ty::InstanceDef::VtableShim(..) = self.def {
2600                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2601                     sig = sig.map_bound(|mut sig| {
2602                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2603                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2604                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2605                         sig
2606                     });
2607                 }
2608                 sig
2609             }
2610             ty::Closure(def_id, substs) => {
2611                 let sig = substs.as_closure().sig();
2612
2613                 let bound_vars = tcx.mk_bound_variable_kinds(
2614                     sig.bound_vars()
2615                         .iter()
2616                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2617                 );
2618                 let br = ty::BoundRegion {
2619                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2620                     kind: ty::BoundRegionKind::BrEnv,
2621                 };
2622                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2623                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2624
2625                 let sig = sig.skip_binder();
2626                 ty::Binder::bind_with_vars(
2627                     tcx.mk_fn_sig(
2628                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2629                         sig.output(),
2630                         sig.c_variadic,
2631                         sig.unsafety,
2632                         sig.abi,
2633                     ),
2634                     bound_vars,
2635                 )
2636             }
2637             ty::Generator(_, substs, _) => {
2638                 let sig = substs.as_generator().poly_sig();
2639
2640                 let bound_vars = tcx.mk_bound_variable_kinds(
2641                     sig.bound_vars()
2642                         .iter()
2643                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2644                 );
2645                 let br = ty::BoundRegion {
2646                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2647                     kind: ty::BoundRegionKind::BrEnv,
2648                 };
2649                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2650                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2651
2652                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2653                 let pin_adt_ref = tcx.adt_def(pin_did);
2654                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2655                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2656
2657                 let sig = sig.skip_binder();
2658                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2659                 let state_adt_ref = tcx.adt_def(state_did);
2660                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2661                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2662                 ty::Binder::bind_with_vars(
2663                     tcx.mk_fn_sig(
2664                         [env_ty, sig.resume_ty].iter(),
2665                         &ret_ty,
2666                         false,
2667                         hir::Unsafety::Normal,
2668                         rustc_target::spec::abi::Abi::Rust,
2669                     ),
2670                     bound_vars,
2671                 )
2672             }
2673             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2674         }
2675     }
2676 }
2677
2678 /// Calculates whether a function's ABI can unwind or not.
2679 ///
2680 /// This takes two primary parameters:
2681 ///
2682 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2683 ///   codegen attrs for a defined function. For function pointers this set of
2684 ///   flags is the empty set. This is only applicable for Rust-defined
2685 ///   functions, and generally isn't needed except for small optimizations where
2686 ///   we try to say a function which otherwise might look like it could unwind
2687 ///   doesn't actually unwind (such as for intrinsics and such).
2688 ///
2689 /// * `abi` - this is the ABI that the function is defined with. This is the
2690 ///   primary factor for determining whether a function can unwind or not.
2691 ///
2692 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2693 /// panics are implemented with unwinds on most platform (when
2694 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2695 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2696 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2697 /// defined for each ABI individually, but it always corresponds to some form of
2698 /// stack-based unwinding (the exact mechanism of which varies
2699 /// platform-by-platform).
2700 ///
2701 /// Rust functions are classfied whether or not they can unwind based on the
2702 /// active "panic strategy". In other words Rust functions are considered to
2703 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2704 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2705 /// only if the final panic mode is panic=abort. In this scenario any code
2706 /// previously compiled assuming that a function can unwind is still correct, it
2707 /// just never happens to actually unwind at runtime.
2708 ///
2709 /// This function's answer to whether or not a function can unwind is quite
2710 /// impactful throughout the compiler. This affects things like:
2711 ///
2712 /// * Calling a function which can't unwind means codegen simply ignores any
2713 ///   associated unwinding cleanup.
2714 /// * Calling a function which can unwind from a function which can't unwind
2715 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2716 ///   aborts the process.
2717 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2718 ///   affects various optimizations and codegen.
2719 ///
2720 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2721 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2722 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2723 /// might (from a foreign exception or similar).
2724 #[inline]
2725 pub fn fn_can_unwind<'tcx>(
2726     tcx: TyCtxt<'tcx>,
2727     codegen_fn_attr_flags: CodegenFnAttrFlags,
2728     abi: SpecAbi,
2729 ) -> bool {
2730     // Special attribute for functions which can't unwind.
2731     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2732         return false;
2733     }
2734
2735     // Otherwise if this isn't special then unwinding is generally determined by
2736     // the ABI of the itself. ABIs like `C` have variants which also
2737     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2738     // ABIs have such an option. Otherwise the only other thing here is Rust
2739     // itself, and those ABIs are determined by the panic strategy configured
2740     // for this compilation.
2741     //
2742     // Unfortunately at this time there's also another caveat. Rust [RFC
2743     // 2945][rfc] has been accepted and is in the process of being implemented
2744     // and stabilized. In this interim state we need to deal with historical
2745     // rustc behavior as well as plan for future rustc behavior.
2746     //
2747     // Historically functions declared with `extern "C"` were marked at the
2748     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2749     // or not. This is UB for functions in `panic=unwind` mode that then
2750     // actually panic and unwind. Note that this behavior is true for both
2751     // externally declared functions as well as Rust-defined function.
2752     //
2753     // To fix this UB rustc would like to change in the future to catch unwinds
2754     // from function calls that may unwind within a Rust-defined `extern "C"`
2755     // function and forcibly abort the process, thereby respecting the
2756     // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2757     // ready to roll out, so determining whether or not the `C` family of ABIs
2758     // unwinds is conditional not only on their definition but also whether the
2759     // `#![feature(c_unwind)]` feature gate is active.
2760     //
2761     // Note that this means that unlike historical compilers rustc now, by
2762     // default, unconditionally thinks that the `C` ABI may unwind. This will
2763     // prevent some optimization opportunities, however, so we try to scope this
2764     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2765     // to `panic=abort`).
2766     //
2767     // Eventually the check against `c_unwind` here will ideally get removed and
2768     // this'll be a little cleaner as it'll be a straightforward check of the
2769     // ABI.
2770     //
2771     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2772     use SpecAbi::*;
2773     match abi {
2774         C { unwind }
2775         | System { unwind }
2776         | Cdecl { unwind }
2777         | Stdcall { unwind }
2778         | Fastcall { unwind }
2779         | Vectorcall { unwind }
2780         | Thiscall { unwind }
2781         | Aapcs { unwind }
2782         | Win64 { unwind }
2783         | SysV64 { unwind } => {
2784             unwind
2785                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2786         }
2787         PtxKernel
2788         | Msp430Interrupt
2789         | X86Interrupt
2790         | AmdGpuKernel
2791         | EfiApi
2792         | AvrInterrupt
2793         | AvrNonBlockingInterrupt
2794         | CCmseNonSecureCall
2795         | Wasm
2796         | RustIntrinsic
2797         | PlatformIntrinsic
2798         | Unadjusted => false,
2799         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2800     }
2801 }
2802
2803 #[inline]
2804 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2805     use rustc_target::spec::abi::Abi::*;
2806     match tcx.sess.target.adjust_abi(abi) {
2807         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2808
2809         // It's the ABI's job to select this, not ours.
2810         System { .. } => bug!("system abi should be selected elsewhere"),
2811         EfiApi => bug!("eficall abi should be selected elsewhere"),
2812
2813         Stdcall { .. } => Conv::X86Stdcall,
2814         Fastcall { .. } => Conv::X86Fastcall,
2815         Vectorcall { .. } => Conv::X86VectorCall,
2816         Thiscall { .. } => Conv::X86ThisCall,
2817         C { .. } => Conv::C,
2818         Unadjusted => Conv::C,
2819         Win64 { .. } => Conv::X86_64Win64,
2820         SysV64 { .. } => Conv::X86_64SysV,
2821         Aapcs { .. } => Conv::ArmAapcs,
2822         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2823         PtxKernel => Conv::PtxKernel,
2824         Msp430Interrupt => Conv::Msp430Intr,
2825         X86Interrupt => Conv::X86Intr,
2826         AmdGpuKernel => Conv::AmdGpuKernel,
2827         AvrInterrupt => Conv::AvrInterrupt,
2828         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2829         Wasm => Conv::C,
2830
2831         // These API constants ought to be more specific...
2832         Cdecl { .. } => Conv::C,
2833     }
2834 }
2835
2836 /// Error produced by attempting to compute or adjust a `FnAbi`.
2837 #[derive(Copy, Clone, Debug, HashStable)]
2838 pub enum FnAbiError<'tcx> {
2839     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2840     Layout(LayoutError<'tcx>),
2841
2842     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2843     AdjustForForeignAbi(call::AdjustForForeignAbiError),
2844 }
2845
2846 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2847     fn from(err: LayoutError<'tcx>) -> Self {
2848         Self::Layout(err)
2849     }
2850 }
2851
2852 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2853     fn from(err: call::AdjustForForeignAbiError) -> Self {
2854         Self::AdjustForForeignAbi(err)
2855     }
2856 }
2857
2858 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2859     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2860         match self {
2861             Self::Layout(err) => err.fmt(f),
2862             Self::AdjustForForeignAbi(err) => err.fmt(f),
2863         }
2864     }
2865 }
2866
2867 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2868 // just for error handling.
2869 #[derive(Debug)]
2870 pub enum FnAbiRequest<'tcx> {
2871     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2872     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2873 }
2874
2875 /// Trait for contexts that want to be able to compute `FnAbi`s.
2876 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2877 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2878     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2879     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2880     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2881
2882     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2883     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2884     ///
2885     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2886     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2887     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2888     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2889     fn handle_fn_abi_err(
2890         &self,
2891         err: FnAbiError<'tcx>,
2892         span: Span,
2893         fn_abi_request: FnAbiRequest<'tcx>,
2894     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2895 }
2896
2897 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2898 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2899     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2900     ///
2901     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2902     /// instead, where the instance is an `InstanceDef::Virtual`.
2903     #[inline]
2904     fn fn_abi_of_fn_ptr(
2905         &self,
2906         sig: ty::PolyFnSig<'tcx>,
2907         extra_args: &'tcx ty::List<Ty<'tcx>>,
2908     ) -> Self::FnAbiOfResult {
2909         // FIXME(eddyb) get a better `span` here.
2910         let span = self.layout_tcx_at_span();
2911         let tcx = self.tcx().at(span);
2912
2913         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2914             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2915         ))
2916     }
2917
2918     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2919     /// direct calls to an `fn`.
2920     ///
2921     /// NB: that includes virtual calls, which are represented by "direct calls"
2922     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2923     #[inline]
2924     fn fn_abi_of_instance(
2925         &self,
2926         instance: ty::Instance<'tcx>,
2927         extra_args: &'tcx ty::List<Ty<'tcx>>,
2928     ) -> Self::FnAbiOfResult {
2929         // FIXME(eddyb) get a better `span` here.
2930         let span = self.layout_tcx_at_span();
2931         let tcx = self.tcx().at(span);
2932
2933         MaybeResult::from(
2934             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2935                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2936                 // we can get some kind of span even if one wasn't provided.
2937                 // However, we don't do this early in order to avoid calling
2938                 // `def_span` unconditionally (which may have a perf penalty).
2939                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2940                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2941             }),
2942         )
2943     }
2944 }
2945
2946 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2947
2948 fn fn_abi_of_fn_ptr<'tcx>(
2949     tcx: TyCtxt<'tcx>,
2950     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2951 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2952     let (param_env, (sig, extra_args)) = query.into_parts();
2953
2954     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2955         sig,
2956         extra_args,
2957         None,
2958         CodegenFnAttrFlags::empty(),
2959         false,
2960     )
2961 }
2962
2963 fn fn_abi_of_instance<'tcx>(
2964     tcx: TyCtxt<'tcx>,
2965     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2966 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2967     let (param_env, (instance, extra_args)) = query.into_parts();
2968
2969     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
2970
2971     let caller_location = if instance.def.requires_caller_location(tcx) {
2972         Some(tcx.caller_location_ty())
2973     } else {
2974         None
2975     };
2976
2977     let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
2978
2979     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2980         sig,
2981         extra_args,
2982         caller_location,
2983         attrs,
2984         matches!(instance.def, ty::InstanceDef::Virtual(..)),
2985     )
2986 }
2987
2988 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
2989     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
2990     // arguments of this method, into a separate `struct`.
2991     fn fn_abi_new_uncached(
2992         &self,
2993         sig: ty::PolyFnSig<'tcx>,
2994         extra_args: &[Ty<'tcx>],
2995         caller_location: Option<Ty<'tcx>>,
2996         codegen_fn_attr_flags: CodegenFnAttrFlags,
2997         // FIXME(eddyb) replace this with something typed, like an `enum`.
2998         force_thin_self_ptr: bool,
2999     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3000         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3001
3002         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3003
3004         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3005
3006         let mut inputs = sig.inputs();
3007         let extra_args = if sig.abi == RustCall {
3008             assert!(!sig.c_variadic && extra_args.is_empty());
3009
3010             if let Some(input) = sig.inputs().last() {
3011                 if let ty::Tuple(tupled_arguments) = input.kind() {
3012                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3013                     tupled_arguments
3014                 } else {
3015                     bug!(
3016                         "argument to function with \"rust-call\" ABI \
3017                             is not a tuple"
3018                     );
3019                 }
3020             } else {
3021                 bug!(
3022                     "argument to function with \"rust-call\" ABI \
3023                         is not a tuple"
3024                 );
3025             }
3026         } else {
3027             assert!(sig.c_variadic || extra_args.is_empty());
3028             extra_args
3029         };
3030
3031         let target = &self.tcx.sess.target;
3032         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3033         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3034         let linux_s390x_gnu_like =
3035             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3036         let linux_sparc64_gnu_like =
3037             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3038         let linux_powerpc_gnu_like =
3039             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3040         use SpecAbi::*;
3041         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3042
3043         // Handle safe Rust thin and fat pointers.
3044         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3045                                       scalar: Scalar,
3046                                       layout: TyAndLayout<'tcx>,
3047                                       offset: Size,
3048                                       is_return: bool| {
3049             // Booleans are always a noundef i1 that needs to be zero-extended.
3050             if scalar.is_bool() {
3051                 attrs.ext(ArgExtension::Zext);
3052                 attrs.set(ArgAttribute::NoUndef);
3053                 return;
3054             }
3055
3056             // Scalars which have invalid values cannot be undef.
3057             if !scalar.is_always_valid(self) {
3058                 attrs.set(ArgAttribute::NoUndef);
3059             }
3060
3061             // Only pointer types handled below.
3062             if scalar.value != Pointer {
3063                 return;
3064             }
3065
3066             if !scalar.valid_range.contains(0) {
3067                 attrs.set(ArgAttribute::NonNull);
3068             }
3069
3070             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3071                 if let Some(kind) = pointee.safe {
3072                     attrs.pointee_align = Some(pointee.align);
3073
3074                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3075                     // for the entire duration of the function as they can be deallocated
3076                     // at any time. Set their valid size to 0.
3077                     attrs.pointee_size = match kind {
3078                         PointerKind::UniqueOwned => Size::ZERO,
3079                         _ => pointee.size,
3080                     };
3081
3082                     // `Box`, `&T`, and `&mut T` cannot be undef.
3083                     // Note that this only applies to the value of the pointer itself;
3084                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3085                     attrs.set(ArgAttribute::NoUndef);
3086
3087                     // `Box` pointer parameters never alias because ownership is transferred
3088                     // `&mut` pointer parameters never alias other parameters,
3089                     // or mutable global data
3090                     //
3091                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3092                     // and can be marked as both `readonly` and `noalias`, as
3093                     // LLVM's definition of `noalias` is based solely on memory
3094                     // dependencies rather than pointer equality
3095                     //
3096                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3097                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3098                     // or not to actually emit the attribute. It can also be controlled with the
3099                     // `-Zmutable-noalias` debugging option.
3100                     let no_alias = match kind {
3101                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3102                         PointerKind::UniqueOwned => true,
3103                         PointerKind::Frozen => !is_return,
3104                     };
3105                     if no_alias {
3106                         attrs.set(ArgAttribute::NoAlias);
3107                     }
3108
3109                     if kind == PointerKind::Frozen && !is_return {
3110                         attrs.set(ArgAttribute::ReadOnly);
3111                     }
3112
3113                     if kind == PointerKind::UniqueBorrowed && !is_return {
3114                         attrs.set(ArgAttribute::NoAliasMutRef);
3115                     }
3116                 }
3117             }
3118         };
3119
3120         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3121             let is_return = arg_idx.is_none();
3122
3123             let layout = self.layout_of(ty)?;
3124             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3125                 // Don't pass the vtable, it's not an argument of the virtual fn.
3126                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3127                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3128                 make_thin_self_ptr(self, layout)
3129             } else {
3130                 layout
3131             };
3132
3133             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3134                 let mut attrs = ArgAttributes::new();
3135                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3136                 attrs
3137             });
3138
3139             if arg.layout.is_zst() {
3140                 // For some forsaken reason, x86_64-pc-windows-gnu
3141                 // doesn't ignore zero-sized struct arguments.
3142                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3143                 if is_return
3144                     || rust_abi
3145                     || (!win_x64_gnu
3146                         && !linux_s390x_gnu_like
3147                         && !linux_sparc64_gnu_like
3148                         && !linux_powerpc_gnu_like)
3149                 {
3150                     arg.mode = PassMode::Ignore;
3151                 }
3152             }
3153
3154             Ok(arg)
3155         };
3156
3157         let mut fn_abi = FnAbi {
3158             ret: arg_of(sig.output(), None)?,
3159             args: inputs
3160                 .iter()
3161                 .copied()
3162                 .chain(extra_args.iter().copied())
3163                 .chain(caller_location)
3164                 .enumerate()
3165                 .map(|(i, ty)| arg_of(ty, Some(i)))
3166                 .collect::<Result<_, _>>()?,
3167             c_variadic: sig.c_variadic,
3168             fixed_count: inputs.len(),
3169             conv,
3170             can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3171         };
3172         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3173         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3174         Ok(self.tcx.arena.alloc(fn_abi))
3175     }
3176
3177     fn fn_abi_adjust_for_abi(
3178         &self,
3179         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3180         abi: SpecAbi,
3181     ) -> Result<(), FnAbiError<'tcx>> {
3182         if abi == SpecAbi::Unadjusted {
3183             return Ok(());
3184         }
3185
3186         if abi == SpecAbi::Rust
3187             || abi == SpecAbi::RustCall
3188             || abi == SpecAbi::RustIntrinsic
3189             || abi == SpecAbi::PlatformIntrinsic
3190         {
3191             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3192                 if arg.is_ignore() {
3193                     return;
3194                 }
3195
3196                 match arg.layout.abi {
3197                     Abi::Aggregate { .. } => {}
3198
3199                     // This is a fun case! The gist of what this is doing is
3200                     // that we want callers and callees to always agree on the
3201                     // ABI of how they pass SIMD arguments. If we were to *not*
3202                     // make these arguments indirect then they'd be immediates
3203                     // in LLVM, which means that they'd used whatever the
3204                     // appropriate ABI is for the callee and the caller. That
3205                     // means, for example, if the caller doesn't have AVX
3206                     // enabled but the callee does, then passing an AVX argument
3207                     // across this boundary would cause corrupt data to show up.
3208                     //
3209                     // This problem is fixed by unconditionally passing SIMD
3210                     // arguments through memory between callers and callees
3211                     // which should get them all to agree on ABI regardless of
3212                     // target feature sets. Some more information about this
3213                     // issue can be found in #44367.
3214                     //
3215                     // Note that the platform intrinsic ABI is exempt here as
3216                     // that's how we connect up to LLVM and it's unstable
3217                     // anyway, we control all calls to it in libstd.
3218                     Abi::Vector { .. }
3219                         if abi != SpecAbi::PlatformIntrinsic
3220                             && self.tcx.sess.target.simd_types_indirect =>
3221                     {
3222                         arg.make_indirect();
3223                         return;
3224                     }
3225
3226                     _ => return,
3227                 }
3228
3229                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
3230                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
3231                 let max_by_val_size = Pointer.size(self) * 2;
3232                 let size = arg.layout.size;
3233
3234                 if arg.layout.is_unsized() || size > max_by_val_size {
3235                     arg.make_indirect();
3236                 } else {
3237                     // We want to pass small aggregates as immediates, but using
3238                     // a LLVM aggregate type for this leads to bad optimizations,
3239                     // so we pick an appropriately sized integer type instead.
3240                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3241                 }
3242             };
3243             fixup(&mut fn_abi.ret);
3244             for arg in &mut fn_abi.args {
3245                 fixup(arg);
3246             }
3247         } else {
3248             fn_abi.adjust_for_foreign_abi(self, abi)?;
3249         }
3250
3251         Ok(())
3252     }
3253 }
3254
3255 fn make_thin_self_ptr<'tcx>(
3256     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3257     layout: TyAndLayout<'tcx>,
3258 ) -> TyAndLayout<'tcx> {
3259     let tcx = cx.tcx();
3260     let fat_pointer_ty = if layout.is_unsized() {
3261         // unsized `self` is passed as a pointer to `self`
3262         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3263         tcx.mk_mut_ptr(layout.ty)
3264     } else {
3265         match layout.abi {
3266             Abi::ScalarPair(..) => (),
3267             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3268         }
3269
3270         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3271         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3272         // elsewhere in the compiler as a method on a `dyn Trait`.
3273         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3274         // get a built-in pointer type
3275         let mut fat_pointer_layout = layout;
3276         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3277             && !fat_pointer_layout.ty.is_region_ptr()
3278         {
3279             for i in 0..fat_pointer_layout.fields.count() {
3280                 let field_layout = fat_pointer_layout.field(cx, i);
3281
3282                 if !field_layout.is_zst() {
3283                     fat_pointer_layout = field_layout;
3284                     continue 'descend_newtypes;
3285                 }
3286             }
3287
3288             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3289         }
3290
3291         fat_pointer_layout.ty
3292     };
3293
3294     // we now have a type like `*mut RcBox<dyn Trait>`
3295     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3296     // this is understood as a special case elsewhere in the compiler
3297     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3298
3299     TyAndLayout {
3300         ty: fat_pointer_ty,
3301
3302         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3303         // should always work because the type is always `*mut ()`.
3304         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3305     }
3306 }